sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
marimo-team/marimo:tests/_pyodide/test_pyodide_session.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import json
from textwrap import dedent
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, Mock
import msgspec
import pytest
from marimo._ast.app_config import _AppConfig
from marimo._config.config import DEFAULT_CONFIG
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.types import KernelMessage
from marimo._pyodide.pyodide_session import (
AsyncQueueManager,
PyodideBridge,
PyodideSession,
parse_command,
)
from marimo._runtime.commands import (
AppMetadata,
ClearCacheCommand,
CreateNotebookCommand,
DebugCellCommand,
DeleteCellCommand,
ExecuteCellsCommand,
ExecuteScratchpadCommand,
ExecuteStaleCellsCommand,
GetCacheInfoCommand,
InstallPackagesCommand,
InvokeFunctionCommand,
ListDataSourceConnectionCommand,
ListSecretKeysCommand,
ListSQLTablesCommand,
ModelCommand,
PreviewDatasetColumnCommand,
PreviewSQLTableCommand,
RefreshSecretsCommand,
RenameNotebookCommand,
StopKernelCommand,
SyncGraphCommand,
UpdateCellConfigCommand,
UpdateUIElementCommand,
ValidateSQLCommand,
)
from marimo._runtime.context.types import teardown_context
from marimo._session.model import SessionMode
from marimo._session.notebook import AppFileManager
from marimo._types.ids import CellId_t, UIElementId
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Generator
from pathlib import Path
@pytest.fixture
def pyodide_app_file(tmp_path: Path) -> Path:
filename = tmp_path / "test.py"
filename.write_text(
"""
import marimo
app = marimo.App()
@app.cell
def _():
"Hello, world!"
return
"""
)
return filename
@pytest.fixture
def mock_pyodide_http() -> Generator[MagicMock, None, None]:
import sys
mock = MagicMock()
sys.modules["pyodide_http"] = mock
mock.patch_all.return_value = None
yield mock
del sys.modules["pyodide_http"]
@pytest.fixture
def mock_pyodide() -> Generator[MagicMock, None, None]:
import sys
from types import ModuleType
# Create a proper module structure for pyodide
mock = MagicMock(spec=ModuleType)
mock.code = MagicMock(spec=ModuleType)
mock.code.find_imports = MagicMock(
return_value=["numpy", "pandas", "sklearn", "matplotlib.pyplot"]
)
# Save original module if it exists
original_module = sys.modules.get("pyodide", None)
# Install our mock
sys.modules["pyodide"] = mock
sys.modules["pyodide.code"] = mock
yield mock
# Restore original state
if original_module is not None:
sys.modules["pyodide"] = original_module
else:
del sys.modules["pyodide"]
@pytest.fixture
async def pyodide_session(
pyodide_app_file: Path,
) -> AsyncGenerator[PyodideSession, None]:
def _on_write(msg: KernelMessage) -> None:
pass
session = PyodideSession(
app=AppFileManager(filename=str(pyodide_app_file)),
mode=SessionMode.EDIT,
on_write=_on_write,
app_metadata=AppMetadata(
query_params={},
cli_args={},
app_config=_AppConfig(),
filename=str(pyodide_app_file),
),
user_config=DEFAULT_CONFIG,
)
yield session
teardown_context()
async def test_async_queue_manager() -> None:
async_queue_manager = AsyncQueueManager()
# Test putting and getting from queues
stop_request = StopKernelCommand()
set_ui_element_request = UpdateUIElementCommand(
object_ids=[UIElementId("test")], values=["test"]
)
async_queue_manager.control_queue.put_nowait(stop_request)
async_queue_manager.set_ui_element_queue.put_nowait(set_ui_element_request)
assert await async_queue_manager.control_queue.get() == stop_request
assert (
await async_queue_manager.set_ui_element_queue.get()
== set_ui_element_request
)
async def test_pyodide_session_start(
pyodide_session: PyodideSession,
mock_pyodide_http: MagicMock,
) -> None:
# Test starting the session
start_task = asyncio.create_task(pyodide_session.start())
await asyncio.sleep(0) # Let the task start
assert pyodide_session.kernel_task is not None
mock_pyodide_http.patch_all.assert_called_once()
pyodide_session.kernel_task.stop()
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
async def test_pyodide_session_put_control_request(
pyodide_session: PyodideSession,
) -> None:
# Test putting control requests
execution_request = ExecuteCellsCommand(
cell_ids=[CellId_t("test")],
codes=["test"],
)
set_ui_element_request = UpdateUIElementCommand(
object_ids=[UIElementId("test")], values=["test"]
)
pyodide_session.put_control_request(execution_request)
pyodide_session.put_control_request(set_ui_element_request)
assert not pyodide_session._queue_manager.control_queue.empty()
assert not pyodide_session._queue_manager.set_ui_element_queue.empty()
async def test_pyodide_session_find_packages(
pyodide_session: PyodideSession,
mock_pyodide: MagicMock,
) -> None:
# We don't use `pyodide.code.find_imports`
# because this returns imports not in the lockfile (and potentially not trusted).
code = dedent(
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
"""
)
packages = pyodide_session.find_packages(code)
assert sorted(packages) == sorted([])
mock_pyodide.code.find_imports.assert_not_called()
async def test_pyodide_session_find_packages_with_script_metadata(
pyodide_session: PyodideSession,
mock_pyodide: MagicMock,
) -> None:
# Test finding packages in code
code = dedent(
"""
# /// script
# dependencies = [
# "foo",
# "bar==1.0.0",
# "baz>=2.0.0",
# ]
# ///
import numpy as np
import pandas as pd
"""
)
packages = pyodide_session.find_packages(code)
assert sorted(packages) == sorted(["foo", "bar", "baz"])
mock_pyodide.code.find_imports.assert_not_called()
async def test_complex_find_packages(pyodide_session: PyodideSession) -> None:
code = dedent(
"""
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "plotly[express]==6.5.0",
# "polars==1.36.1",
# ]
# ///
import marimo
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
"""
)
packages = pyodide_session.find_packages(code)
assert sorted(packages) == sorted(["plotly[express]", "polars"])
async def test_strip_version_resilience(
pyodide_session: PyodideSession,
) -> None:
"""Test strip_version function handles various PEP 440 version specifiers and edge cases."""
code_templates = [
# Basic version specifiers
('dependencies = ["package==1.0.0"]', ["package"]),
('dependencies = ["package>=1.0.0"]', ["package"]),
('dependencies = ["package<=2.0.0"]', ["package"]),
('dependencies = ["package~=1.4"]', ["package"]),
('dependencies = ["package!=1.5.0"]', ["package"]),
('dependencies = ["package>1.0"]', ["package"]),
('dependencies = ["package<2.0"]', ["package"]),
('dependencies = ["package===1.0.0"]', ["package"]),
# With extras
('dependencies = ["package[extra]==1.0.0"]', ["package[extra]"]),
(
'dependencies = ["package[extra1,extra2]>=1.0.0"]',
["package[extra1,extra2]"],
),
# With whitespace
('dependencies = ["package >= 1.0.0"]', ["package"]),
('dependencies = [" package==1.0.0 "]', ["package"]),
# With environment markers
(
'dependencies = ["package>=1.0.0; python_version>=\\"3.8\\""]',
["package"],
),
(
'dependencies = ["package[extra]>=1.0; sys_platform==\\"linux\\""]',
["package[extra]"],
),
# URL dependencies - left as-is
(
'dependencies = ["package @ https://github.com/user/repo.git"]',
["package @ https://github.com/user/repo.git"],
),
(
'dependencies = ["package @ git+https://github.com/user/repo.git"]',
["package @ git+https://github.com/user/repo.git"],
),
(
'dependencies = ["package @ git+ssh://git@github.com/user/repo.git"]',
["package @ git+ssh://git@github.com/user/repo.git"],
),
(
'dependencies = ["package @ file:///path/to/local/package"]',
["package @ file:///path/to/local/package"],
),
# Multiple packages with various specifiers
(
'dependencies = ["foo==1.0", "bar>=2.0", "baz~=3.0", "qux[extra]>=4.0"]',
["foo", "bar", "baz", "qux[extra]"],
),
# No version specifier
('dependencies = ["package"]', ["package"]),
]
for deps_str, expected in code_templates:
code = dedent(
f"""
# /// script
# {deps_str}
# ///
import marimo
"""
)
packages = pyodide_session.find_packages(code)
assert sorted(packages) == sorted(expected), (
f"Failed for {deps_str}: expected {expected}, got {packages}"
)
async def test_pyodide_session_put_input(
pyodide_session: PyodideSession,
) -> None:
# Test putting input
input_text = "test input"
pyodide_session.put_input(input_text)
assert not pyodide_session._queue_manager.input_queue.empty()
assert await pyodide_session._queue_manager.input_queue.get() == input_text
@pytest.mark.parametrize(
("json_payload", "expected_type"),
[
# Notebook operations
(
'{"type": "create-notebook", "executionRequests": [{"cellId": "cell-1", "code": "print(1)", "type": "execute-cell"}], '
'"setUiElementValueRequest": {"objectIds": [], "values": [], "type": "update-ui-element"}, '
'"autoRun": true}',
CreateNotebookCommand,
),
(
'{"type": "rename-notebook", "filename": "test.py"}',
RenameNotebookCommand,
),
# Cell execution and management
(
'{"type": "execute-cells", "cellIds": ["cell-1"], "codes": ["print(1)"]}',
ExecuteCellsCommand,
),
(
'{"type": "execute-cells", "cellIds": ["cell-1", "cell-2"], "codes": ["x=1", "y=2"]}',
ExecuteCellsCommand,
),
(
'{"type": "execute-scratchpad", "code": "print(1)"}',
ExecuteScratchpadCommand,
),
('{"type": "execute-stale-cells"}', ExecuteStaleCellsCommand),
('{"type": "debug-cell", "cellId": "cell-1"}', DebugCellCommand),
('{"type": "delete-cell", "cellId": "cell-1"}', DeleteCellCommand),
(
'{"type": "sync-graph", "cells": {"cell-1": "x=1"}, "runIds": ["cell-1"], "deleteIds": []}',
SyncGraphCommand,
),
(
'{"type": "update-cell-config", "configs": {"cell-1": {"hide_code": true}}}',
UpdateCellConfigCommand,
),
# Package management
(
'{"type": "install-packages", "manager": "pip", "versions": {}}',
InstallPackagesCommand,
),
(
'{"type": "install-packages", "manager": "pip", "versions": {"numpy": "1.24.0"}}',
InstallPackagesCommand,
),
# UI element and widget model operations
(
'{"type": "update-ui-element", "objectIds": ["test-1"], "values": [42], "token": "test-token"}',
UpdateUIElementCommand,
),
(
'{"type": "update-ui-element", "objectIds": ["test-1"], "values": [42]}',
UpdateUIElementCommand,
),
(
'{"type": "update-ui-element", "objectIds": [], "values": []}',
UpdateUIElementCommand,
),
(
'{"type": "model", "modelId": "model-1", "message": {"method": "update", "state": {}, "bufferPaths": []}, "buffers": []}',
ModelCommand,
),
(
'{"type": "invoke-function", "functionCallId": "fc-1", "namespace": "test", "functionName": "foo", "args": {}}',
InvokeFunctionCommand,
),
# Data/SQL operations
(
'{"type": "preview-dataset-column", "sourceType": "duckdb", "source": "test.db", "tableName": "users", "columnName": "id"}',
PreviewDatasetColumnCommand,
),
(
'{"type": "preview-sql-table", "requestId": "req-1", "engine": "duckdb", "database": "test.db", '
'"schema": "main", "tableName": "users"}',
PreviewSQLTableCommand,
),
(
'{"type": "list-sql-tables", "requestId": "req-2", "engine": "duckdb", "database": "test.db", "schema": "main"}',
ListSQLTablesCommand,
),
(
'{"type": "validate-sql", "requestId": "req-3", "query": "SELECT * FROM users", "onlyParse": false}',
ValidateSQLCommand,
),
(
'{"type": "validate-sql", "requestId": "req-4", "query": "SELECT * FROM users", "onlyParse": true, "dialect": "postgres"}',
ValidateSQLCommand,
),
(
'{"type": "list-data-source-connection", "engine": "duckdb"}',
ListDataSourceConnectionCommand,
),
# Secrets management
(
'{"type": "list-secret-keys", "requestId": "req-1"}',
ListSecretKeysCommand,
),
('{"type": "refresh-secrets"}', RefreshSecretsCommand),
# Cache management
('{"type": "clear-cache"}', ClearCacheCommand),
('{"type": "get-cache-info"}', GetCacheInfoCommand),
# Kernel operations
('{"type": "stop-kernel"}', StopKernelCommand),
],
)
def test_command_parsing_with_discriminator(
json_payload: str, expected_type: type
) -> None:
"""Test that Command types are parsed correctly using the type discriminator."""
parsed = parse_command(json_payload)
assert type(parsed) is expected_type, (
f"Expected {expected_type.__name__} but got {type(parsed).__name__} "
f"for payload: {json_payload}"
)
def test_control_request_parsing_invalid() -> None:
"""Test that invalid JSON raises DecodeError."""
with pytest.raises(msgspec.DecodeError):
parse_command("invalid json")
async def test_async_queue_manager_close() -> None:
"""Test closing queues puts a StopRequest."""
manager = AsyncQueueManager()
manager.close_queues()
request = await manager.control_queue.get()
assert isinstance(request, StopKernelCommand)
async def test_pyodide_session_on_message(
pyodide_session: PyodideSession,
) -> None:
"""Test that _on_message calls all consumers."""
mock_consumer1 = Mock()
mock_consumer2 = Mock()
pyodide_session.consumers = [mock_consumer1, mock_consumer2]
test_msg: KernelMessage = {
"op": "completed-run",
"cell_id": CellId_t("test"),
}
pyodide_session._on_message(test_msg)
mock_consumer1.assert_called_once_with(test_msg)
mock_consumer2.assert_called_once_with(test_msg)
async def test_pyodide_session_put_completion_request(
pyodide_session: PyodideSession,
) -> None:
"""Test putting completion requests."""
from marimo._runtime.commands import CodeCompletionCommand
completion_request = CodeCompletionCommand(
id="test",
document="test code",
cell_id=CellId_t("test"),
)
pyodide_session.put_completion_request(completion_request)
assert not pyodide_session._queue_manager.completion_queue.empty()
result = await pyodide_session._queue_manager.completion_queue.get()
assert result == completion_request
# ===== PyodideBridge Tests =====
@pytest.fixture
def pyodide_bridge(
pyodide_session: PyodideSession,
) -> PyodideBridge:
"""Create a PyodideBridge instance for testing."""
return PyodideBridge(session=pyodide_session)
def test_pyodide_bridge_init(pyodide_bridge: PyodideBridge) -> None:
"""Test PyodideBridge initialization."""
assert pyodide_bridge.session is not None
assert pyodide_bridge.file_system is not None
def test_pyodide_bridge_put_control_request(
pyodide_bridge: PyodideBridge,
) -> None:
"""Test putting control requests through the bridge."""
request_json = '{"type": "execute-cells", "cellIds": ["cell-1"], "codes": ["print(1)"]}'
pyodide_bridge.put_control_request(request_json)
assert not pyodide_bridge.session._queue_manager.control_queue.empty()
def test_pyodide_bridge_put_input(pyodide_bridge: PyodideBridge) -> None:
"""Test putting input through the bridge."""
test_input = "test input"
pyodide_bridge.put_input(test_input)
assert not pyodide_bridge.session._queue_manager.input_queue.empty()
def test_pyodide_bridge_code_complete(pyodide_bridge: PyodideBridge) -> None:
"""Test code completion through the bridge."""
request_json = '{"id": "test", "document": "test code", "cellId": "test"}'
pyodide_bridge.code_complete(request_json)
assert not pyodide_bridge.session._queue_manager.completion_queue.empty()
def test_pyodide_bridge_read_code(
pyodide_bridge: PyodideBridge,
pyodide_app_file: Path,
) -> None:
"""Test reading code through the bridge."""
del pyodide_app_file
result = pyodide_bridge.read_code()
response = json.loads(result)
assert "contents" in response
assert "marimo.App()" in response["contents"]
@pytest.mark.skipif(
not (DependencyManager.ruff.has() or DependencyManager.black.has()),
reason="ruff or black not installed",
)
async def test_pyodide_bridge_format(pyodide_bridge: PyodideBridge) -> None:
"""Test formatting code through the bridge."""
request_json = json.dumps(
{
"codes": {"cell-1": "x=1+2"},
"lineLength": 79,
}
)
result = await pyodide_bridge.format(request_json)
response = json.loads(result)
assert "codes" in response
assert "cell-1" in response["codes"]
def test_pyodide_bridge_save(
pyodide_bridge: PyodideBridge,
pyodide_app_file: Path,
) -> None:
"""Test saving notebook through the bridge."""
request_json = json.dumps(
{
"cellIds": ["test"],
"codes": ["# Updated code"],
"names": ["_"],
"configs": [{}], # Must match length of cell_ids
"filename": str(pyodide_app_file),
}
)
pyodide_bridge.save(request_json)
def test_pyodide_bridge_save_app_config(
pyodide_bridge: PyodideBridge,
pyodide_app_file: Path,
) -> None:
del pyodide_app_file
"""Test saving app config through the bridge."""
request_json = json.dumps(
{
"config": {
"width": "full",
}
}
)
# Should not raise
pyodide_bridge.save_app_config(request_json)
def test_pyodide_bridge_save_user_config(
pyodide_bridge: PyodideBridge,
) -> None:
"""Test saving user config through the bridge."""
request_json = json.dumps(
{
"config": {
"completion": {"activate_on_typing": True},
}
}
)
pyodide_bridge.save_user_config(request_json)
# Should have put a UpdateUserConfigRequest in the queue
assert not pyodide_bridge.session._queue_manager.control_queue.empty()
def test_pyodide_bridge_rename_file(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test renaming file through the bridge."""
new_filename = str(tmp_path / "renamed.py")
pyodide_bridge.rename_file(new_filename)
assert pyodide_bridge.session.app_manager.filename == new_filename
def test_pyodide_bridge_list_files(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test listing files through the bridge."""
# Create some test files
(tmp_path / "test1.py").write_text("# test1")
(tmp_path / "test2.py").write_text("# test2")
request_json = json.dumps({"path": str(tmp_path)})
result = pyodide_bridge.list_files(request_json)
response = json.loads(result)
assert "files" in response
assert "root" in response
assert response["root"] == str(tmp_path)
def test_pyodide_bridge_file_details(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test getting file details through the bridge."""
test_file = tmp_path / "test.py"
test_file.write_text("# test")
request_json = json.dumps({"path": str(test_file)})
result = pyodide_bridge.file_details(request_json)
response = json.loads(result)
assert "file" in response
assert "contents" in response
assert response["file"]["path"] == str(test_file)
def test_pyodide_bridge_create_file(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test creating file through the bridge."""
import base64
test_content = b"# new file"
encoded_content = base64.b64encode(test_content).decode()
request_json = json.dumps(
{
"path": str(tmp_path),
"type": "file",
"name": "new_file.py",
"contents": encoded_content,
}
)
result = pyodide_bridge.create_file_or_directory(request_json)
response = json.loads(result)
assert response["success"] is True
assert (tmp_path / "new_file.py").exists()
def test_pyodide_bridge_delete_file(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test deleting file through the bridge."""
test_file = tmp_path / "to_delete.py"
test_file.write_text("# delete me")
request_json = json.dumps({"path": str(test_file)})
result = pyodide_bridge.delete_file_or_directory(request_json)
response = json.loads(result)
assert response["success"] is True
assert not test_file.exists()
def test_pyodide_bridge_move_file(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test moving file through the bridge."""
test_file = tmp_path / "old.py"
test_file.write_text("# move me")
new_path = tmp_path / "new.py"
request_json = json.dumps(
{
"path": str(test_file),
"newPath": str(new_path),
}
)
result = pyodide_bridge.move_file_or_directory(request_json)
response = json.loads(result)
assert response["success"] is True
assert not test_file.exists()
assert new_path.exists()
def test_pyodide_bridge_update_file(
pyodide_bridge: PyodideBridge,
tmp_path: Path,
) -> None:
"""Test updating file through the bridge."""
test_file = tmp_path / "update.py"
test_file.write_text("# old content")
new_content = "# new content"
request_json = json.dumps(
{
"path": str(test_file),
"contents": new_content,
}
)
result = pyodide_bridge.update_file(request_json)
response = json.loads(result)
assert response["success"] is True
assert test_file.read_text() == new_content
def test_pyodide_bridge_export_html(
pyodide_bridge: PyodideBridge,
) -> None:
"""Test exporting HTML through the bridge."""
request_json = json.dumps(
{
"download": False,
"files": [],
"includeCode": True,
}
)
result = pyodide_bridge.export_html(request_json)
html = json.loads(result)
assert isinstance(html, str)
# HTML should contain marimo-related content
assert len(html) > 0
def test_pyodide_bridge_export_markdown(
pyodide_bridge: PyodideBridge,
) -> None:
"""Test exporting markdown through the bridge."""
result = pyodide_bridge.export_markdown("{}")
markdown = json.loads(result)
assert isinstance(markdown, str)
assert len(markdown) > 0
async def test_pyodide_bridge_read_snippets(
pyodide_bridge: PyodideBridge,
) -> None:
"""Test reading snippets through the bridge."""
result = await pyodide_bridge.read_snippets()
data = json.loads(result)
assert isinstance(data, dict)
assert "snippets" in data
assert isinstance(data["snippets"], list)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_pyodide/test_pyodide_session.py",
"license": "Apache License 2.0",
"lines": 684,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_pyodide/test_pyodide_streams.py | import asyncio
import json
from unittest.mock import Mock
import pytest
from marimo._pyodide.streams import (
PyodideStderr,
PyodideStdin,
PyodideStdout,
PyodideStream,
)
from marimo._types.ids import CellId_t
cell_id = CellId_t("test-cell-id")
@pytest.fixture
def pyodide_pipe() -> Mock:
return Mock()
@pytest.fixture
async def pyodide_input_queue() -> asyncio.Queue[str]:
return asyncio.Queue()
@pytest.fixture
def pyodide_(
pyodide_pipe: Mock,
pyodide_input_queue: asyncio.Queue[str],
) -> PyodideStream:
return PyodideStream(pyodide_pipe, pyodide_input_queue, cell_id)
@pytest.fixture
def pyodide_stdout(pyodide_: PyodideStream) -> PyodideStdout:
return PyodideStdout(pyodide_)
@pytest.fixture
def pyodide_stderr(pyodide_: PyodideStream) -> PyodideStderr:
return PyodideStderr(pyodide_)
@pytest.fixture
def pyodide_stdin(pyodide_: PyodideStream) -> PyodideStdin:
stdin = PyodideStdin(pyodide_)
stdin._get_response = lambda: "test input\n"
return stdin
class TestPyodideStream:
def test_write(self, pyodide_: PyodideStream, pyodide_pipe: Mock) -> None:
data = {"key": "value"}
pyodide_.write(data)
pyodide_pipe.assert_called_once_with(data)
class TestPyodideStdout:
def test_writable(self, pyodide_stdout: PyodideStdout) -> None:
assert pyodide_stdout.writable() is True
def test_readable(self, pyodide_stdout: PyodideStdout) -> None:
assert pyodide_stdout.readable() is False
def test_seekable(self, pyodide_stdout: PyodideStdout) -> None:
assert pyodide_stdout.seekable() is False
def test_write(
self, pyodide_stdout: PyodideStdout, pyodide_pipe: Mock
) -> None:
data = "test output"
pyodide_stdout.write(data)
assert pyodide_pipe.call_count == 1
msg_bytes = pyodide_pipe.call_args[0][0]
msg = json.loads(msg_bytes)
assert msg["op"] == "cell-op"
assert msg["cell_id"] == pyodide_stdout.stream.cell_id
assert msg["console"]["mimetype"] == "text/plain"
assert msg["console"]["data"] == data
def test_writelines(
self, pyodide_stdout: PyodideStdout, pyodide_pipe: Mock
) -> None:
lines = ["line1\n", "line2\n", "line3\n"]
pyodide_stdout.writelines(lines)
assert pyodide_pipe.call_count == 3
class TestPyodideStderr:
def test_writable(self, pyodide_stderr: PyodideStderr) -> None:
assert pyodide_stderr.writable() is True
def test_readable(self, pyodide_stderr: PyodideStderr) -> None:
assert pyodide_stderr.readable() is False
def test_seekable(self, pyodide_stderr: PyodideStderr) -> None:
assert pyodide_stderr.seekable() is False
def test_write(
self, pyodide_stderr: PyodideStderr, pyodide_pipe: Mock
) -> None:
data = "test error"
pyodide_stderr.write(data)
assert pyodide_pipe.call_count == 1
msg_bytes = pyodide_pipe.call_args[0][0]
msg = json.loads(msg_bytes)
assert msg["op"] == "cell-op"
assert msg["cell_id"] == pyodide_stderr.stream.cell_id
assert msg["console"]["mimetype"] == "text/plain"
assert msg["console"]["data"] == data
def test_writelines(
self, pyodide_stderr: PyodideStderr, pyodide_pipe: Mock
) -> None:
lines = ["error1\n", "error2\n", "error3\n"]
pyodide_stderr.writelines(lines)
assert pyodide_pipe.call_count == 3
class TestPyodideStdin:
def test_writable(self, pyodide_stdin: PyodideStdin) -> None:
assert pyodide_stdin.writable() is False
def test_readable(self, pyodide_stdin: PyodideStdin) -> None:
assert pyodide_stdin.readable() is True
async def test_readline(
self,
pyodide_stdin: PyodideStdin,
pyodide_pipe: Mock,
pyodide_input_queue: asyncio.Queue[str],
) -> None:
# Queue up a response
await pyodide_input_queue.put("test input\n")
# Read the line
result = pyodide_stdin.readline()
assert result == "test input\n"
# Verify prompt was sent
assert pyodide_pipe.call_count == 1
msg_bytes = pyodide_pipe.call_args[0][0]
msg = json.loads(msg_bytes)
assert msg["op"] == "cell-op"
assert msg["cell_id"] == pyodide_stdin.stream.cell_id
assert msg["console"]["mimetype"] == "text/plain"
assert msg["console"]["data"] == ""
async def test_readline_with_prompt(
self,
pyodide_stdin: PyodideStdin,
pyodide_pipe: Mock,
pyodide_input_queue: asyncio.Queue[str],
) -> None:
# Queue up a response
await pyodide_input_queue.put("test input\n")
# Read the line with prompt
result = pyodide_stdin._readline_with_prompt("Enter: ")
assert result == "test input\n"
# Verify prompt was sent
assert pyodide_pipe.call_count == 1
msg_bytes = pyodide_pipe.call_args[0][0]
msg = json.loads(msg_bytes)
assert msg["op"] == "cell-op"
assert msg["cell_id"] == pyodide_stdin.stream.cell_id
assert msg["console"]["mimetype"] == "text/plain"
assert msg["console"]["data"] == "Enter: "
async def test_readlines(
self,
pyodide_stdin: PyodideStdin,
pyodide_pipe: Mock,
pyodide_input_queue: asyncio.Queue[str],
) -> None:
pyodide_stdin._get_response = Mock(
return_value="line1\nline2\nline3\n"
)
# Queue up a response
await pyodide_input_queue.put("line1\nline2\nline3\n")
# Read the lines
result = pyodide_stdin.readlines()
assert result == ["line1", "line2", "line3", ""]
# Verify prompt was sent
assert pyodide_pipe.call_count == 1
msg_bytes = pyodide_pipe.call_args[0][0]
msg = json.loads(msg_bytes)
assert msg["op"] == "cell-op"
assert msg["cell_id"] == pyodide_stdin.stream.cell_id
assert msg["console"]["mimetype"] == "text/plain"
assert msg["console"]["data"] == ""
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_pyodide/test_pyodide_streams.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_pyodide/test_restartable_task.py | import asyncio
import pytest
from marimo._pyodide.restartable_task import RestartableTask
async def test_restartable_task():
# Test basic start/stop
counter = 0
event = asyncio.Event()
async def increment_counter():
nonlocal counter
counter += 1
event.set() # Signal that we've incremented
await asyncio.sleep(0.1)
task = RestartableTask(increment_counter)
assert counter == 0
# Start task
start_task = asyncio.create_task(task.start())
await event.wait() # Wait for increment
assert counter == 1
event.clear()
# Stop task
task.stop()
await asyncio.sleep(0.1) # Let stop take effect
assert counter == 1 # Shouldn't increment after stop
# Cleanup
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
async def test_restartable_task_restart():
# Test restart functionality
counter = 0
event = asyncio.Event()
async def increment_counter():
nonlocal counter
counter += 1
event.set() # Signal that we've incremented
await asyncio.sleep(0.1)
task = RestartableTask(increment_counter)
start_task = asyncio.create_task(task.start())
# Let it run once
await event.wait()
assert counter == 1
event.clear()
# Restart task
task.restart()
await event.wait()
assert counter == 2 # Should increment again after restart
event.clear()
# Stop task
task.stop()
await asyncio.sleep(0.1)
assert counter == 2 # Shouldn't increment after stop
# Cleanup
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
async def test_restartable_task_cancellation():
# Test cancellation handling
counter = 0
event = asyncio.Event()
cancelled = False
async def increment_counter():
nonlocal counter, cancelled
try:
counter += 1
event.set() # Signal that we've incremented
await asyncio.sleep(0.1)
except asyncio.CancelledError:
cancelled = True
event.set() # Signal cancellation
raise
task = RestartableTask(increment_counter)
start_task = asyncio.create_task(task.start())
# Let it run once
await event.wait()
assert counter == 1
assert not cancelled
event.clear()
# Cancel task
task.restart() # This will cancel the current task
await event.wait()
assert cancelled # Should have been cancelled
event.clear()
# Let new task run
await event.wait()
assert counter == 2 # Should have restarted and incremented again
event.clear()
# Stop task
task.stop()
await asyncio.sleep(0.1)
assert counter == 2 # Shouldn't increment after stop
# Cleanup
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
async def test_stop_before_start_assertion():
"""Test stopping a task before it's started should raise assertion error."""
async def dummy_coro():
await asyncio.sleep(0.1)
task = RestartableTask(dummy_coro)
# Stop before start should raise assertion error due to task being None
with pytest.raises(AssertionError):
task.stop()
async def test_restart_before_start_assertion():
"""Test restarting a task before it's started should raise assertion error."""
async def dummy_coro():
await asyncio.sleep(0.1)
task = RestartableTask(dummy_coro)
# Restart before start should raise assertion error
with pytest.raises(AssertionError):
task.restart()
async def test_exception_in_coro_stops_task():
"""Test that exceptions in the coroutine stop the task loop."""
counter = 0
exception_occurred = False
async def failing_coro():
nonlocal counter, exception_occurred
counter += 1
exception_occurred = True
raise ValueError("Test error")
task = RestartableTask(failing_coro)
start_task = asyncio.create_task(task.start())
# Let it fail
await asyncio.sleep(0.05)
# Should have tried once and then stopped due to exception
assert counter == 1
assert exception_occurred
assert start_task.done()
# Cleanup
start_task.cancel()
try:
await start_task
except (asyncio.CancelledError, ValueError):
pass
async def test_task_state_consistency():
"""Test that task state remains consistent through operations."""
call_count = 0
async def state_coro():
nonlocal call_count
call_count += 1
await asyncio.sleep(0.05)
task = RestartableTask(state_coro)
# Initial state
assert task.task is None
assert not task.stopped
# Start task
start_task = asyncio.create_task(task.start())
await asyncio.sleep(0.01)
# After start
assert task.task is not None
assert not task.stopped
assert not task.task.done()
# Restart
old_task = task.task
task.restart()
await asyncio.sleep(0.01)
# After restart
assert task.task is not None
assert task.task != old_task # New task created
assert not task.stopped
assert old_task.cancelled()
# Stop
task.stop()
await asyncio.sleep(0.01)
# After stop
assert task.stopped
assert task.task.cancelled()
# Cleanup
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
async def test_zero_delay_coro():
"""Test with a coroutine that completes immediately."""
completion_count = 0
async def instant_coro():
nonlocal completion_count
completion_count += 1
# Complete immediately, no await
task = RestartableTask(instant_coro)
start_task = asyncio.create_task(task.start())
# Let it run for a bit - should complete multiple times rapidly
await asyncio.sleep(0.01)
# Should have completed multiple times
initial_count = completion_count
assert initial_count > 0
# Stop and verify it stops
task.stop()
await asyncio.sleep(0.01)
final_count = completion_count
# Should have stopped running
assert final_count >= initial_count
# Cleanup
start_task.cancel()
try:
await start_task
except asyncio.CancelledError:
pass
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_pyodide/test_restartable_task.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_inline_script_metadata.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from marimo._utils.inline_script_metadata import (
PyProjectReader,
_pyproject_toml_to_requirements_txt,
has_marimo_in_script_metadata,
is_marimo_dependency,
script_metadata_hash_from_filename,
)
from marimo._utils.platform import is_windows
from marimo._utils.scripts import read_pyproject_from_script
if TYPE_CHECKING:
from pathlib import Path
def test_get_dependencies():
SCRIPT = """
# Copyright 2026 Marimo. All rights reserved.
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "polars",
# "marimo>=0.8.0",
# "quak",
# "vega-datasets",
# ]
# ///
import marimo
__generated_with = "0.8.2"
app = marimo.App(width="medium")
"""
assert PyProjectReader.from_script(SCRIPT).dependencies == [
"polars",
"marimo>=0.8.0",
"quak",
"vega-datasets",
]
def test_get_dependencies_github():
url = "https://github.com/marimo-team/marimo/blob/a1e1be3190023a86650904249f911b2e6ffb8fac/examples/third_party/leafmap/leafmap_example.py"
assert PyProjectReader.from_filename(url).dependencies == [
"leafmap==0.41.0",
"marimo",
]
def test_no_dependencies():
SCRIPT = """
import marimo
__generated_with = "0.8.2"
app = marimo.App(width="medium")
"""
assert PyProjectReader.from_script(SCRIPT).dependencies == []
def test_windows_line_endings_from_url():
"""Test that script metadata from URL with Windows line endings is parsed correctly."""
from unittest.mock import patch
from marimo._utils.requests import Response
# Script content as it would come from a Windows server with CRLF line endings
SCRIPT_WITH_CRLF = b"""# /// script\r
# requires-python = ">=3.11"\r
# dependencies = [\r
# "polars",\r
# "marimo>=0.8.0",\r
# ]\r
# ///\r
\r
import marimo\r
\r
__generated_with = "0.8.2"\r
app = marimo.App(width="medium")\r
"""
url = "https://example.com/notebook.py"
with patch("marimo._utils.requests.get") as mock_get:
# Mock the response to return content with Windows line endings
mock_get.return_value = Response(
200,
SCRIPT_WITH_CRLF,
{},
)
# This should now work correctly with the line ending normalization in response.text()
reader = PyProjectReader.from_filename(url)
assert reader.dependencies == ["polars", "marimo>=0.8.0"]
assert reader.python_version == ">=3.11"
def test_pyproject_toml_to_requirements_txt_git_sources():
pyproject = {
"dependencies": [
"marimo",
"numpy",
"polars",
"altair",
],
"tool": {
"uv": {
"sources": {
"marimo": {
"git": "https://github.com/marimo-team/marimo.git",
"rev": "main",
},
"numpy": {
"git": "https://github.com/numpy/numpy.git",
"branch": "main",
},
"polars": {
"git": "https://github.com/pola/polars.git",
"branch": "dev",
},
}
}
},
}
assert _pyproject_toml_to_requirements_txt(pyproject) == [
"marimo @ git+https://github.com/marimo-team/marimo.git@main",
"numpy @ git+https://github.com/numpy/numpy.git@main",
"polars @ git+https://github.com/pola/polars.git@dev",
"altair",
]
def test_pyproject_toml_to_requirements_txt_with_marker():
pyproject = {
"dependencies": [
"marimo",
"polars",
],
"tool": {
"uv": {
"sources": {
"marimo": {
"git": "https://github.com/marimo-team/marimo.git",
"tag": "0.1.0",
"marker": "python_version >= '3.12'",
}
}
}
},
}
assert _pyproject_toml_to_requirements_txt(pyproject) == [
"marimo @ git+https://github.com/marimo-team/marimo.git@0.1.0; python_version >= '3.12'", # noqa: E501
"polars",
]
def test_pyproject_toml_to_requirements_txt_with_url_sources():
pyproject = {
"dependencies": [
"marimo",
"polars",
],
"tool": {
"uv": {
"sources": {
"marimo": {
"url": "https://github.com/marimo-team/marimo/archive/refs/heads/main.zip",
}
}
}
},
}
assert _pyproject_toml_to_requirements_txt(pyproject) == [
"marimo @ https://github.com/marimo-team/marimo/archive/refs/heads/main.zip", # noqa: E501
"polars",
]
@pytest.mark.skipif(is_windows(), reason="only testing posix paths")
def test_pyproject_toml_to_requirements_txt_with_local_path():
pyproject = {
"dependencies": [
"marimo",
"polars",
],
"tool": {
"uv": {
"sources": {
"marimo": {
"path": "/Users/me/work/marimo",
}
}
}
},
}
assert _pyproject_toml_to_requirements_txt(pyproject) == [
"marimo @ /Users/me/work/marimo",
"polars",
]
@pytest.mark.skipif(is_windows(), reason="only testing posix paths")
def test_pyproject_toml_to_requirements_txt_with_relative_path():
pyproject = {
"dependencies": [
"marimo",
"polars",
],
"tool": {
"uv": {
"sources": {
"marimo": {
"path": "../local/marimo",
}
}
}
},
}
# Test with a config path to verify relative path resolution
config_path = "/Users/me/project/script.py"
assert _pyproject_toml_to_requirements_txt(pyproject, config_path) == [
"marimo @ /Users/me/local/marimo",
"polars",
]
@pytest.mark.parametrize(
"version_spec",
[
"marimo>=0.1.0",
"marimo==0.1.0",
"marimo<=0.1.0",
"marimo>0.1.0",
"marimo<0.1.0",
"marimo~=0.1.0",
],
)
def test_pyproject_toml_to_requirements_txt_with_versioned_dependencies(
version_spec: str,
):
pyproject = {
"dependencies": [
version_spec,
],
"tool": {
"uv": {
"sources": {
"marimo": {
"git": "https://github.com/marimo-team/marimo.git",
"rev": "main",
},
}
}
},
}
assert _pyproject_toml_to_requirements_txt(pyproject) == [
"marimo @ git+https://github.com/marimo-team/marimo.git@main",
]
def test_get_python_version_requirement():
pyproject = {"requires-python": ">=3.11"}
assert (
PyProjectReader(pyproject, config_path=None).python_version == ">=3.11"
)
pyproject = {"dependencies": ["polars"]}
assert PyProjectReader(pyproject, config_path=None).python_version is None
assert PyProjectReader({}, config_path=None).python_version is None
pyproject = {"requires-python": {"invalid": "type"}}
assert PyProjectReader(pyproject, config_path=None).python_version is None
def test_get_dependencies_with_python_version():
SCRIPT = """
# /// script
# requires-python = ">=3.11"
# dependencies = ["polars"]
# ///
import marimo
"""
assert PyProjectReader.from_script(SCRIPT).dependencies == ["polars"]
pyproject = read_pyproject_from_script(SCRIPT)
assert pyproject is not None
assert (
PyProjectReader(pyproject, config_path=None).python_version == ">=3.11"
)
SCRIPT_NO_PYTHON = """
# /// script
# dependencies = ["polars"]
# ///
import marimo
"""
pyproject_no_python = read_pyproject_from_script(SCRIPT_NO_PYTHON)
assert pyproject_no_python is not None
assert (
PyProjectReader(pyproject_no_python, config_path=None).python_version
is None
)
assert PyProjectReader.from_script(SCRIPT_NO_PYTHON).dependencies == [
"polars"
]
def test_get_dependencies_with_nonexistent_file():
# Test with a non-existent file
assert (
PyProjectReader.from_filename("nonexistent_file.py").dependencies == []
)
# Test with empty
assert PyProjectReader.from_filename("").dependencies == []
def test_is_marimo_dependency():
assert is_marimo_dependency("marimo")
assert is_marimo_dependency("marimo[extras]")
assert not is_marimo_dependency("marimo-extras")
assert not is_marimo_dependency("marimo-ai")
# With version specifiers
assert is_marimo_dependency("marimo==0.1.0")
assert is_marimo_dependency("marimo[extras]>=0.1.0")
assert is_marimo_dependency("marimo[extras]==0.1.0")
assert is_marimo_dependency("marimo[extras]~=0.1.0")
assert is_marimo_dependency("marimo[extras]<=0.1.0")
assert is_marimo_dependency("marimo[extras]>=0.1.0")
assert is_marimo_dependency("marimo[extras]<=0.1.0")
# With other packages
assert not is_marimo_dependency("numpy")
assert not is_marimo_dependency("pandas")
assert not is_marimo_dependency("marimo-ai")
assert not is_marimo_dependency("marimo-ai==0.1.0")
def test_has_marimo_in_script_metadata(tmp_path):
"""Test has_marimo_in_script_metadata returns correct values."""
# True: marimo present
with_marimo = tmp_path / "with_marimo.py"
with_marimo.write_text(
"# /// script\n# dependencies = ['marimo']\n# ///\n"
)
assert has_marimo_in_script_metadata(str(with_marimo)) is True
# False: metadata exists but no marimo
without_marimo = tmp_path / "without_marimo.py"
without_marimo.write_text(
"# /// script\n# dependencies = ['numpy']\n# ///\n"
)
assert has_marimo_in_script_metadata(str(without_marimo)) is False
# None: no metadata
no_metadata = tmp_path / "no_metadata.py"
no_metadata.write_text("import marimo\n")
assert has_marimo_in_script_metadata(str(no_metadata)) is None
# None: non-.py file
assert has_marimo_in_script_metadata(str(tmp_path / "test.md")) is None
def test_script_metadata_hash_from_filename_none_without_metadata(
tmp_path: Path,
) -> None:
notebook = tmp_path / "no_metadata.py"
notebook.write_text("import marimo\n", encoding="utf-8")
assert script_metadata_hash_from_filename(str(notebook)) is None
def test_script_metadata_hash_from_filename_ignores_formatting(
tmp_path: Path,
) -> None:
first = tmp_path / "first.py"
first.write_text(
"""
# /// script
# dependencies = [
# "numpy",
# "marimo>=0.20.0",
# ]
# requires-python = ">=3.11"
# ///
""",
encoding="utf-8",
)
second = tmp_path / "second.py"
second.write_text(
"""
# /// script
# requires-python = ">=3.11"
# dependencies = ["numpy", "marimo>=0.20.0"]
# ///
""",
encoding="utf-8",
)
assert script_metadata_hash_from_filename(
str(first)
) == script_metadata_hash_from_filename(str(second))
def test_script_metadata_hash_from_filename_changes_with_dependencies(
tmp_path: Path,
) -> None:
first = tmp_path / "first.py"
first.write_text(
"""
# /// script
# dependencies = ["numpy"]
# ///
""",
encoding="utf-8",
)
second = tmp_path / "second.py"
second.write_text(
"""
# /// script
# dependencies = ["pandas"]
# ///
""",
encoding="utf-8",
)
assert script_metadata_hash_from_filename(
str(first)
) != script_metadata_hash_from_filename(str(second))
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_inline_script_metadata.py",
"license": "Apache License 2.0",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_save/stores/rest.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import urllib.error
import urllib.request
from typing import Optional
from marimo import _loggers
from marimo._save.stores.store import Store
from marimo._version import __version__
LOGGER = _loggers.marimo_logger()
class RestStore(Store):
def __init__(
self, *, base_url: str, api_key: str, project_id: Optional[str] = None
) -> None:
super().__init__()
assert api_key, "api_key is required"
assert base_url, "base_url is required"
self.base_url = base_url
self.api_key = api_key
self.project_id = project_id
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"User-Agent": f"marimo/{__version__}",
}
import ssl
self.context = ssl.create_default_context()
def get(self, key: str) -> Optional[bytes]:
url = self._get_url(key)
req = urllib.request.Request(url, headers=self.headers)
try:
with urllib.request.urlopen(req, context=self.context) as response:
if response.status == 200:
LOGGER.debug(f"GET {url} - Status: {response.status}")
return response.read() # type: ignore[no-any-return]
if response.status >= 400 and response.status < 500:
# 400s are fine, they just mean the key doesn't exist
return None
raise
except urllib.error.HTTPError as e:
if e.code >= 400 and e.code < 500:
# 400s are fine, they just mean the key doesn't exist
return None
LOGGER.warning(f"GET {url} - Error: {e}")
except Exception as e:
LOGGER.warning(f"GET {url} - Error: {e}")
return None
def put(self, key: str, value: bytes) -> bool:
url = self._get_url(key)
req = urllib.request.Request(
url,
data=value,
headers={
**self.headers,
"Content-Type": "application/octet-stream",
},
method="PUT",
)
try:
with urllib.request.urlopen(req, context=self.context) as response:
LOGGER.debug(f"PUT {url} - Status: {response.status}")
return True
except urllib.error.HTTPError as e:
LOGGER.warning(
f"PUT {url} - Status: {e.status} - Error: {e.reason}"
)
except Exception as e:
LOGGER.warning(f"PUT {url} - Error: {e}")
return False
def hit(self, key: str) -> bool:
url = self._get_url(key)
req = urllib.request.Request(url, headers=self.headers, method="HEAD")
try:
with urllib.request.urlopen(req) as response:
LOGGER.debug(f"HEAD {url} - Status: {response.status}")
return response.status == 200 # type: ignore[no-any-return]
except urllib.error.HTTPError as e:
LOGGER.warning(
f"HEAD {url} - Status: {e.status} - Error: {e.reason}"
)
return False
except Exception as e:
LOGGER.warning(f"HEAD {url} - Error: {e}")
return False
def _get_url(self, key: str) -> str:
url = self.base_url
if self.project_id:
url = f"{url}/{self.project_id}"
return f"{url}/{key}"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_save/stores/rest.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_output/formatters/test_ipython_update.py | from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._output.formatters.ipython_formatters import (
IPythonFormatter,
)
HAS_DEPS = DependencyManager.ipython.has()
@pytest.mark.skipif(not HAS_DEPS, reason="IPython not installed")
@patch("marimo._runtime.output._output.replace")
@patch("marimo._runtime.output._output.append")
def test_display_update(mock_append: MagicMock, mock_replace: MagicMock):
"""Test that display with display_id returns a handle and update works."""
# Import IPython before patching to ensure we get the module
import IPython.display
# Apply our formatter patch
unpatch = IPythonFormatter().register()
try:
# Now use the patched functions
obj1 = IPython.display.HTML("<div>Initial Content</div>")
handle = IPython.display.display(obj1, display_id="test-id")
# Verify handle was returned and append was called
assert handle is not None
assert handle.display_id == "test-id"
mock_append.assert_called_once_with(obj1)
mock_append.reset_mock()
# Test update_display
obj2 = IPython.display.HTML("<div>Updated Content</div>")
IPython.display.update_display(obj2, display_id="test-id")
# Verify replace was called
mock_replace.assert_called_once_with(obj2)
# Test handle.update method
mock_replace.reset_mock()
obj3 = IPython.display.HTML("<div>Handle Updated Content</div>")
handle.update(obj3)
# Verify replace was called
mock_replace.assert_called_once_with(obj3)
finally:
unpatch()
@pytest.mark.skipif(not HAS_DEPS, reason="IPython not installed")
@patch("marimo._runtime.output._output.append")
def test_display_auto_id(mock_append: MagicMock):
"""Test that display with display_id=True auto-generates an ID."""
# Import IPython before patching
import IPython.display
# Apply our formatter patch
unpatch = IPythonFormatter().register()
try:
# Test display with auto-generated display_id
obj = IPython.display.HTML("<div>Auto ID Content</div>")
handle = IPython.display.display(obj, display_id=True)
# Verify handle was returned with a UUID
assert handle is not None
assert isinstance(handle.display_id, str)
assert len(handle.display_id) > 0
mock_append.assert_called_once_with(obj)
finally:
unpatch()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_output/formatters/test_ipython_update.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/4746_altair_hstacks.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import altair as alt
import pandas as pd
import marimo as mo
from vega_datasets import data
source = data.cars()
plot_1 = mo.ui.altair_chart(
alt.Chart(source)
.mark_point()
.encode(
x="Horsepower",
y="Miles_per_Gallon",
)
)
plot_2 = mo.ui.altair_chart(
alt.Chart(source)
.mark_point()
.encode(
x="Year",
y="Horsepower",
)
)
mo.hstack([plot_1, plot_2])
return mo, plot_1, plot_2
@app.cell
def _(mo, plot_1, plot_2):
mo.hstack([plot_1, plot_2], justify="start")
return
@app.cell
def _(mo, plot_1, plot_2):
mo.hstack([plot_1, plot_2], justify="start", widths="equal")
return
@app.cell
def _(plot_1):
plot_1
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/4746_altair_hstacks.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:examples/misc/strands_agent_aws_elasticache_neptune.py |
"""
GitHub Repository Research Agent with Persistent Memory
This example demonstrates how to build an AI agent with persistent memory using:
- Mem0 for memory orchestration and lifecycle management
- Amazon ElastiCache for Valkey for high-performance vector similarity search
- Amazon Neptune Analytics for graph-based relationship storage and traversal
- Strands Agents framework for agent orchestration and tool management
The agent can research GitHub repositories, store information in both vector and graph memory,
and retrieve relevant information for future queries with significant performance improvements.
For detailed explanation and architecture, see the blog posts:
- AWS Blog: https://aws.amazon.com/blogs/database/build-persistent-memory-for-agentic-ai-applications-with-mem0-open-source-amazon-elasticache-for-valkey-and-amazon-neptune-analytics/
- Mem0 Blog: https://mem0.ai/blog/build-persistent-memory-for-agentic-ai-applications-with-mem0-open-source-amazon-elasticache-for-valkey-and-amazon-neptune-analytics
Prerequisites:
1. ElastiCache cluster running Valkey 8.2+ with vector search support
2. Neptune Analytics graph with vector indexes and public access
3. AWS credentials with access to Bedrock, ElastiCache, and Neptune
Environment Variables:
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=your_aws_access_key
- AWS_SECRET_ACCESS_KEY=your_aws_secret_key
- NEPTUNE_ENDPOINT=neptune-graph://your-graph-id (optional, defaults to g-6n3v83av7a)
- VALKEY_URL=valkey://your-cluster-endpoint:6379 (optional, defaults to localhost:6379)
Installation:
pip install strands-agents strands-agents-tools mem0ai streamlit
Usage:
streamlit run agent1.py
Example queries:
1. "What is the URL for the project mem0 and its most important metrics?"
2. "Find the top contributors for Mem0 and store this information in a graph"
3. "Who works in the core packages and the SDK updates?"
"""
import os
import streamlit as st
from strands import Agent, tool
from strands_tools import http_request
from mem0.memory.main import Memory
config = {
"embedder": {
"provider": "aws_bedrock",
"config": {
"model": "amazon.titan-embed-text-v2:0"
}
},
"llm": {
"provider": "aws_bedrock",
"config": {
"model": "us.anthropic.claude-sonnet-4-20250514-v1:0",
"max_tokens": 512,
"temperature": 0.5
}
},
"vector_store": {
"provider": "valkey",
"config": {
"collection_name": "blogpost1",
"embedding_model_dims": 1024,
"valkey_url": os.getenv("VALKEY_URL", "valkey://localhost:6379"),
"index_type": "hnsw",
"hnsw_m": 32,
"hnsw_ef_construction": 400,
"hnsw_ef_runtime": 40
}
}
,
"graph_store": {
"provider": "neptune",
"config": {
"endpoint": os.getenv("NEPTUNE_ENDPOINT", "neptune-graph://g-6n3v83av7a"),
},
}
}
m = Memory.from_config(config)
def get_assistant_response(messages):
"""
Send the entire conversation thread to the agent in the proper Strands message format.
Args:
messages: List of message dictionaries with 'role' and 'content' keys
Returns:
Agent response result
"""
# Format messages for Strands Agent
formatted_messages = []
for message in messages:
formatted_message = {
"role": message["role"],
"content": [{"text": message["content"]}]
}
formatted_messages.append(formatted_message)
# Send the properly formatted message list to the agent
result = agent(formatted_messages)
return result
@tool
def store_memory_tool(information: str, user_id: str = "user", category: str = "conversation") -> str:
"""
Store standalone facts, preferences, descriptions, or unstructured information in vector-based memory.
Use this tool for:
- User preferences ("User prefers dark mode", "Alice likes coffee")
- Standalone facts ("The meeting was productive", "Project deadline is next Friday")
- Descriptions ("Alice is a software engineer", "The office is located downtown")
- General context that doesn't involve relationships between entities
Do NOT use for relationship information - use store_graph_memory_tool instead.
Args:
information: The standalone information to store in vector memory
user_id: User identifier for memory storage (default: "user")
category: Category for organizing memories (e.g., "preferences", "projects", "facts")
Returns:
Confirmation message about memory storage
"""
try:
# Create a simple message format for mem0 vector storage
memory_message = [{"role": "user", "content": information}]
m.add(memory_message, user_id=user_id, metadata={"category": category, "storage_type": "vector"})
return f"✅ Successfully stored information in vector memory: '{information[:100]}...'"
except Exception as e:
print(f"Error storing vector memory: {e}")
return f"❌ Failed to store vector memory: {str(e)}"
@tool
def store_graph_memory_tool(information: str, user_id: str = "user", category: str = "relationships") -> str:
"""
Store relationship-based information, connections, or structured data in graph-based memory.
In memory we will keep the information about projects and repositories we've learned about, including its URL and key metrics
Use this tool for:
- Relationships between people ("John manages Sarah", "Alice works with Bob")
- Entity connections ("Project A depends on Project B", "Alice is part of Team X")
- Hierarchical information ("Sarah reports to John", "Department A contains Team B")
- Network connections ("Alice knows Bob through work", "Company X partners with Company Y")
- Temporal sequences ("Event A led to Event B", "Meeting A was scheduled after Meeting B")
- Any information where entities are connected to each other
Use this instead of store_memory_tool when the information describes relationships or connections.
Args:
information: The relationship or connection information to store in graph memory
user_id: User identifier for memory storage (default: "user")
category: Category for organizing memories (default: "relationships")
Returns:
Confirmation message about graph memory storage
"""
try:
memory_message = [{"role": "user", "content": f"RELATIONSHIP: {information}"}]
m.add(memory_message, user_id=user_id, metadata={"category": category, "storage_type": "graph"})
return f"✅ Successfully stored relationship in graph memory: '{information[:100]}...'"
except Exception as e:
return f"❌ Failed to store graph memory: {str(e)}"
@tool
def search_memory_tool(query: str, user_id: str = "user") -> str:
"""
Search through vector-based memories using semantic similarity to find relevant standalone information.
In memory we will keep the information about projects and repositories we've learned about, including its URL and key metrics
Use this tool for:
- Finding similar concepts or topics ("What do we know about AI?")
- Semantic searches ("Find information about preferences")
- Content-based searches ("What was said about the project deadline?")
- General information retrieval that doesn't involve relationships
For relationship-based queries, use search_graph_memory_tool instead.
Args:
query: Search query to find semantically similar memories
user_id: User identifier to search memories for (default: "user")
Returns:
Relevant vector memories found or message if none found
"""
try:
results = m.search(query, user_id=user_id)
if isinstance(results, dict) and 'results' in results:
memory_list = results['results']
if memory_list:
memory_texts = []
for i, result in enumerate(memory_list, 1):
memory_text = result.get('memory', 'No memory text available')
metadata = result.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
storage_type = metadata.get('storage_type', 'unknown') if isinstance(metadata, dict) else 'unknown'
score = result.get('score', 0)
memory_texts.append(f"{i}. [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
return f"🔍 Found {len(memory_list)} relevant vector memories:\n" + "\n".join(memory_texts)
else:
return f"🔍 No vector memories found for query: '{query}'"
else:
return f"🔍 No vector memories found for query: '{query}'"
except Exception as e:
print(f"Error searching vector memories: {e}")
return f"❌ Failed to search vector memories: {str(e)}"
@tool
def search_graph_memory_tool(query: str, user_id: str = "user") -> str:
"""
Search through graph-based memories to find relationship and connection information.
Use this tool for:
- Finding connections between entities ("How is Alice related to the project?")
- Discovering relationships ("Who works with whom?")
- Path-based queries ("What connects concept A to concept B?")
- Hierarchical questions ("Who reports to whom?")
- Network analysis ("What are all the connections to this person/entity?")
- Relationship-based searches ("Find all partnerships", "Show team structures")
This searches specifically for relationship and connection information stored in the graph.
Args:
query: Search query focused on relationships and connections
user_id: User identifier to search memories for (default: "user")
Returns:
Relevant graph memories and relationships found or message if none found
"""
try:
graph_query = f"relationships connections {query}"
results = m.search(graph_query, user_id=user_id)
if isinstance(results, dict) and 'results' in results:
memory_list = results['results']
if memory_list:
memory_texts = []
relationship_count = 0
for i, result in enumerate(memory_list, 1):
memory_text = result.get('memory', 'No memory text available')
metadata = result.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
storage_type = metadata.get('storage_type', 'unknown') if isinstance(metadata, dict) else 'unknown'
score = result.get('score', 0)
# Prioritize graph/relationship memories
if 'RELATIONSHIP:' in memory_text or storage_type == 'graph' or category == 'relationships':
relationship_count += 1
memory_texts.append(f"{i}. 🔗 [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
else:
memory_texts.append(f"{i}. [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
result_summary = f"🔗 Found {len(memory_list)} relevant memories ({relationship_count} relationship-focused):\n"
return result_summary + "\n".join(memory_texts)
else:
return f"🔗 No graph memories found for query: '{query}'"
else:
return f"🔗 No graph memories found for query: '{query}'"
except Exception as e:
print(f"Error searching graph memories: {e}")
return f"Failed to search graph memories: {str(e)}"
@tool
def get_all_memories_tool(user_id: str = "user") -> str:
"""
Retrieve all stored memories for a user to get comprehensive context.
Use this tool when you need to understand the full history of what has been remembered
about a user or when you need comprehensive context for decision making.
Args:
user_id: User identifier to get all memories for (default: "user")
Returns:
All memories for the user or message if none found
"""
try:
all_memories = m.get_all(user_id=user_id)
if isinstance(all_memories, dict) and 'results' in all_memories:
memory_list = all_memories['results']
if memory_list:
memory_texts = []
for i, memory in enumerate(memory_list, 1):
memory_text = memory.get('memory', 'No memory text available')
metadata = memory.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
created_at = memory.get('created_at', 'unknown time')
memory_texts.append(f"{i}. [{category}] {memory_text} (stored: {created_at})")
return f"📚 Found {len(memory_list)} total memories:\n" + "\n".join(memory_texts)
else:
return f"📚 No memories found for user: '{user_id}'"
else:
return f"📚 No memories found for user: '{user_id}'"
except Exception as e:
print(f"Error retrieving all memories: {e}")
return f"❌ Failed to retrieve memories: {str(e)}"
# Initialize agent with tools (must be after tool definitions)
agent = Agent(tools=[http_request, store_memory_tool, store_graph_memory_tool, search_memory_tool, search_graph_memory_tool, get_all_memories_tool])
def store_memory(messages, user_id="alice", category="conversation"):
"""
Store the conversation thread in mem0 memory.
Args:
messages: List of message dictionaries with 'role' and 'content' keys
user_id: User identifier for memory storage
category: Category for organizing memories
Returns:
Memory storage result
"""
try:
result = m.add(messages, user_id=user_id, metadata={"category": category})
#print(f"Memory stored successfully: {result}")
return result
except Exception:
#print(f"Error storing memory: {e}")
return None
def get_agent_metrics(result):
agent_metrics = f"I've used {result.metrics.cycle_count} cycle counts," + f" {result.metrics.accumulated_usage['totalTokens']} tokens" + f", and {sum(result.metrics.cycle_durations):.2f} seconds finding that answer"
print(agent_metrics)
return agent_metrics
st.title("Repo Research Agent")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Create a container with the chat frame styling
with st.container():
st.markdown('<div class="chat-frame">', unsafe_allow_html=True)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
st.markdown('</div>', unsafe_allow_html=True)
# React to user input
if prompt := st.chat_input("Send a message"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Let the agent decide autonomously when to store memories
# Pass the entire conversation thread to the agent
response = get_assistant_response(st.session_state.messages)
# Extract the text content from the AgentResult
response_text = str(response)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response_text)
# Add assistant response to chat history (store as string, not AgentResult)
st.session_state.messages.append({"role": "assistant", "content": response_text})
tokenusage = get_agent_metrics(response)
# Add assistant token usage to chat history
with st.chat_message("assistant"):
st.markdown(tokenusage)
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/strands_agent_aws_elasticache_neptune.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/configs/vector_stores/cassandra.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, model_validator
class CassandraConfig(BaseModel):
"""Configuration for Apache Cassandra vector database."""
contact_points: List[str] = Field(
...,
description="List of contact point addresses (e.g., ['127.0.0.1', '127.0.0.2'])"
)
port: int = Field(9042, description="Cassandra port")
username: Optional[str] = Field(None, description="Database username")
password: Optional[str] = Field(None, description="Database password")
keyspace: str = Field("mem0", description="Keyspace name")
collection_name: str = Field("memories", description="Table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
secure_connect_bundle: Optional[str] = Field(
None,
description="Path to secure connect bundle for DataStax Astra DB"
)
protocol_version: int = Field(4, description="CQL protocol version")
load_balancing_policy: Optional[Any] = Field(
None,
description="Custom load balancing policy object"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate authentication parameters."""
username = values.get("username")
password = values.get("password")
# Both username and password must be provided together or not at all
if (username and not password) or (password and not username):
raise ValueError(
"Both 'username' and 'password' must be provided together for authentication"
)
return values
@model_validator(mode="before")
@classmethod
def check_connection_config(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate connection configuration."""
secure_connect_bundle = values.get("secure_connect_bundle")
contact_points = values.get("contact_points")
# Either secure_connect_bundle or contact_points must be provided
if not secure_connect_bundle and not contact_points:
raise ValueError(
"Either 'contact_points' or 'secure_connect_bundle' must be provided"
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/cassandra.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/vector_stores/cassandra.py | import json
import logging
import uuid
from typing import Any, Dict, List, Optional
import numpy as np
from pydantic import BaseModel
try:
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
except ImportError:
raise ImportError(
"Apache Cassandra vector store requires cassandra-driver. "
"Please install it using 'pip install cassandra-driver'"
)
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class CassandraDB(VectorStoreBase):
def __init__(
self,
contact_points: List[str],
port: int = 9042,
username: Optional[str] = None,
password: Optional[str] = None,
keyspace: str = "mem0",
collection_name: str = "memories",
embedding_model_dims: int = 1536,
secure_connect_bundle: Optional[str] = None,
protocol_version: int = 4,
load_balancing_policy: Optional[Any] = None,
):
"""
Initialize the Apache Cassandra vector store.
Args:
contact_points (List[str]): List of contact point addresses (e.g., ['127.0.0.1'])
port (int): Cassandra port (default: 9042)
username (str, optional): Database username
password (str, optional): Database password
keyspace (str): Keyspace name (default: "mem0")
collection_name (str): Table name (default: "memories")
embedding_model_dims (int): Dimension of the embedding vector (default: 1536)
secure_connect_bundle (str, optional): Path to secure connect bundle for Astra DB
protocol_version (int): CQL protocol version (default: 4)
load_balancing_policy (Any, optional): Custom load balancing policy
"""
self.contact_points = contact_points
self.port = port
self.username = username
self.password = password
self.keyspace = keyspace
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.secure_connect_bundle = secure_connect_bundle
self.protocol_version = protocol_version
self.load_balancing_policy = load_balancing_policy
# Initialize connection
self.cluster = None
self.session = None
self._setup_connection()
# Create keyspace and table if they don't exist
self._create_keyspace()
self._create_table()
def _setup_connection(self):
"""Setup Cassandra cluster connection."""
try:
# Setup authentication
auth_provider = None
if self.username and self.password:
auth_provider = PlainTextAuthProvider(
username=self.username,
password=self.password
)
# Connect to Astra DB using secure connect bundle
if self.secure_connect_bundle:
self.cluster = Cluster(
cloud={'secure_connect_bundle': self.secure_connect_bundle},
auth_provider=auth_provider,
protocol_version=self.protocol_version
)
else:
# Connect to standard Cassandra cluster
cluster_kwargs = {
'contact_points': self.contact_points,
'port': self.port,
'protocol_version': self.protocol_version
}
if auth_provider:
cluster_kwargs['auth_provider'] = auth_provider
if self.load_balancing_policy:
cluster_kwargs['load_balancing_policy'] = self.load_balancing_policy
self.cluster = Cluster(**cluster_kwargs)
self.session = self.cluster.connect()
logger.info("Successfully connected to Cassandra cluster")
except Exception as e:
logger.error(f"Failed to connect to Cassandra: {e}")
raise
def _create_keyspace(self):
"""Create keyspace if it doesn't exist."""
try:
# Use SimpleStrategy for single datacenter, NetworkTopologyStrategy for production
query = f"""
CREATE KEYSPACE IF NOT EXISTS {self.keyspace}
WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}
"""
self.session.execute(query)
self.session.set_keyspace(self.keyspace)
logger.info(f"Keyspace '{self.keyspace}' is ready")
except Exception as e:
logger.error(f"Failed to create keyspace: {e}")
raise
def _create_table(self):
"""Create table with vector column if it doesn't exist."""
try:
# Create table with vector stored as list<float> and payload as text (JSON)
query = f"""
CREATE TABLE IF NOT EXISTS {self.keyspace}.{self.collection_name} (
id text PRIMARY KEY,
vector list<float>,
payload text
)
"""
self.session.execute(query)
logger.info(f"Table '{self.collection_name}' is ready")
except Exception as e:
logger.error(f"Failed to create table: {e}")
raise
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
"""
Create a new collection (table in Cassandra).
Args:
name (str, optional): Collection name (uses self.collection_name if not provided)
vector_size (int, optional): Vector dimension (uses self.embedding_model_dims if not provided)
distance (str): Distance metric (cosine, euclidean, dot_product)
"""
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
try:
query = f"""
CREATE TABLE IF NOT EXISTS {self.keyspace}.{table_name} (
id text PRIMARY KEY,
vector list<float>,
payload text
)
"""
self.session.execute(query)
logger.info(f"Created collection '{table_name}' with vector dimension {dims}")
except Exception as e:
logger.error(f"Failed to create collection: {e}")
raise
def insert(
self,
vectors: List[List[float]],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None
):
"""
Insert vectors into the collection.
Args:
vectors (List[List[float]]): List of vectors to insert
payloads (List[Dict], optional): List of payloads corresponding to vectors
ids (List[str], optional): List of IDs corresponding to vectors
"""
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
payloads = [{}] * len(vectors)
if ids is None:
ids = [str(uuid.uuid4()) for _ in range(len(vectors))]
try:
query = f"""
INSERT INTO {self.keyspace}.{self.collection_name} (id, vector, payload)
VALUES (?, ?, ?)
"""
prepared = self.session.prepare(query)
for vector, payload, vec_id in zip(vectors, payloads, ids):
self.session.execute(
prepared,
(vec_id, vector, json.dumps(payload))
)
except Exception as e:
logger.error(f"Failed to insert vectors: {e}")
raise
def search(
self,
query: str,
vectors: List[float],
limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
"""
Search for similar vectors using cosine similarity.
Args:
query (str): Query string (not used in vector search)
vectors (List[float]): Query vector
limit (int): Number of results to return
filters (Dict, optional): Filters to apply to the search
Returns:
List[OutputData]: Search results
"""
try:
# Fetch all vectors (in production, you'd want pagination or filtering)
query_cql = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
"""
rows = self.session.execute(query_cql)
# Calculate cosine similarity in Python
query_vec = np.array(vectors)
scored_results = []
for row in rows:
if not row.vector:
continue
vec = np.array(row.vector)
# Cosine similarity
similarity = np.dot(query_vec, vec) / (np.linalg.norm(query_vec) * np.linalg.norm(vec))
distance = 1 - similarity
# Apply filters if provided
if filters:
try:
payload = json.loads(row.payload) if row.payload else {}
match = all(payload.get(k) == v for k, v in filters.items())
if not match:
continue
except json.JSONDecodeError:
continue
scored_results.append((row.id, distance, row.payload))
# Sort by distance and limit
scored_results.sort(key=lambda x: x[1])
scored_results = scored_results[:limit]
return [
OutputData(
id=r[0],
score=float(r[1]),
payload=json.loads(r[2]) if r[2] else {}
)
for r in scored_results
]
except Exception as e:
logger.error(f"Search failed: {e}")
raise
def delete(self, vector_id: str):
"""
Delete a vector by ID.
Args:
vector_id (str): ID of the vector to delete
"""
try:
query = f"""
DELETE FROM {self.keyspace}.{self.collection_name}
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (vector_id,))
logger.info(f"Deleted vector with id: {vector_id}")
except Exception as e:
logger.error(f"Failed to delete vector: {e}")
raise
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
"""
Update a vector and its payload.
Args:
vector_id (str): ID of the vector to update
vector (List[float], optional): Updated vector
payload (Dict, optional): Updated payload
"""
try:
if vector is not None:
query = f"""
UPDATE {self.keyspace}.{self.collection_name}
SET vector = ?
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (vector, vector_id))
if payload is not None:
query = f"""
UPDATE {self.keyspace}.{self.collection_name}
SET payload = ?
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (json.dumps(payload), vector_id))
logger.info(f"Updated vector with id: {vector_id}")
except Exception as e:
logger.error(f"Failed to update vector: {e}")
raise
def get(self, vector_id: str) -> Optional[OutputData]:
"""
Retrieve a vector by ID.
Args:
vector_id (str): ID of the vector to retrieve
Returns:
OutputData: Retrieved vector or None if not found
"""
try:
query = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
WHERE id = ?
"""
prepared = self.session.prepare(query)
row = self.session.execute(prepared, (vector_id,)).one()
if not row:
return None
return OutputData(
id=row.id,
score=None,
payload=json.loads(row.payload) if row.payload else {}
)
except Exception as e:
logger.error(f"Failed to get vector: {e}")
return None
def list_cols(self) -> List[str]:
"""
List all collections (tables in the keyspace).
Returns:
List[str]: List of collection names
"""
try:
query = f"""
SELECT table_name
FROM system_schema.tables
WHERE keyspace_name = '{self.keyspace}'
"""
rows = self.session.execute(query)
return [row.table_name for row in rows]
except Exception as e:
logger.error(f"Failed to list collections: {e}")
return []
def delete_col(self):
"""Delete the collection (table)."""
try:
query = f"""
DROP TABLE IF EXISTS {self.keyspace}.{self.collection_name}
"""
self.session.execute(query)
logger.info(f"Deleted collection '{self.collection_name}'")
except Exception as e:
logger.error(f"Failed to delete collection: {e}")
raise
def col_info(self) -> Dict[str, Any]:
"""
Get information about the collection.
Returns:
Dict[str, Any]: Collection information
"""
try:
# Get row count (approximate)
query = f"""
SELECT COUNT(*) as count
FROM {self.keyspace}.{self.collection_name}
"""
row = self.session.execute(query).one()
count = row.count if row else 0
return {
"name": self.collection_name,
"keyspace": self.keyspace,
"count": count,
"vector_dims": self.embedding_model_dims
}
except Exception as e:
logger.error(f"Failed to get collection info: {e}")
return {}
def list(
self,
filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
"""
List all vectors in the collection.
Args:
filters (Dict, optional): Filters to apply
limit (int): Number of vectors to return
Returns:
List[List[OutputData]]: List of vectors
"""
try:
query = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
LIMIT {limit}
"""
rows = self.session.execute(query)
results = []
for row in rows:
# Apply filters if provided
if filters:
try:
payload = json.loads(row.payload) if row.payload else {}
match = all(payload.get(k) == v for k, v in filters.items())
if not match:
continue
except json.JSONDecodeError:
continue
results.append(
OutputData(
id=row.id,
score=None,
payload=json.loads(row.payload) if row.payload else {}
)
)
return [results]
except Exception as e:
logger.error(f"Failed to list vectors: {e}")
return [[]]
def reset(self):
"""Reset the collection by truncating it."""
try:
logger.warning(f"Resetting collection {self.collection_name}...")
query = f"""
TRUNCATE TABLE {self.keyspace}.{self.collection_name}
"""
self.session.execute(query)
logger.info(f"Collection '{self.collection_name}' has been reset")
except Exception as e:
logger.error(f"Failed to reset collection: {e}")
raise
def __del__(self):
"""Close the cluster connection when the object is deleted."""
try:
if self.cluster:
self.cluster.shutdown()
logger.info("Cassandra cluster connection closed")
except Exception:
pass
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/cassandra.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_cassandra.py | import json
import pytest
from unittest.mock import Mock, patch
from mem0.vector_stores.cassandra import CassandraDB, OutputData
@pytest.fixture
def mock_session():
"""Create a mock Cassandra session."""
session = Mock()
session.execute = Mock(return_value=Mock())
session.prepare = Mock(return_value=Mock())
session.set_keyspace = Mock()
return session
@pytest.fixture
def mock_cluster(mock_session):
"""Create a mock Cassandra cluster."""
cluster = Mock()
cluster.connect = Mock(return_value=mock_session)
cluster.shutdown = Mock()
return cluster
@pytest.fixture
def cassandra_instance(mock_cluster, mock_session):
"""Create a CassandraDB instance with mocked cluster."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
)
instance.session = mock_session
return instance
def test_cassandra_init(mock_cluster, mock_session):
"""Test CassandraDB initialization."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
)
assert instance.contact_points == ['127.0.0.1']
assert instance.port == 9042
assert instance.username == 'testuser'
assert instance.keyspace == 'test_keyspace'
assert instance.collection_name == 'test_collection'
assert instance.embedding_model_dims == 128
def test_create_col(cassandra_instance):
"""Test collection creation."""
cassandra_instance.create_col(name="new_collection", vector_size=256)
# Verify that execute was called (table creation)
assert cassandra_instance.session.execute.called
def test_insert(cassandra_instance):
"""Test vector insertion."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
ids = ["id1", "id2"]
# Mock prepared statement
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_search(cassandra_instance):
"""Test vector search."""
# Mock the database response
mock_row1 = Mock()
mock_row1.id = 'id1'
mock_row1.vector = [0.1, 0.2, 0.3]
mock_row1.payload = json.dumps({"text": "test1"})
mock_row2 = Mock()
mock_row2.id = 'id2'
mock_row2.vector = [0.4, 0.5, 0.6]
mock_row2.payload = json.dumps({"text": "test2"})
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
query_vector = [0.2, 0.3, 0.4]
results = cassandra_instance.search(query="test", vectors=query_vector, limit=5)
assert isinstance(results, list)
assert len(results) <= 5
assert cassandra_instance.session.execute.called
def test_delete(cassandra_instance):
"""Test vector deletion."""
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.delete(vector_id="test_id")
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_update(cassandra_instance):
"""Test vector update."""
new_vector = [0.7, 0.8, 0.9]
new_payload = {"text": "updated"}
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.update(vector_id="test_id", vector=new_vector, payload=new_payload)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_get(cassandra_instance):
"""Test retrieving a vector by ID."""
# Mock the database response
mock_row = Mock()
mock_row.id = 'test_id'
mock_row.vector = [0.1, 0.2, 0.3]
mock_row.payload = json.dumps({"text": "test"})
mock_result = Mock()
mock_result.one = Mock(return_value=mock_row)
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.session.execute = Mock(return_value=mock_result)
result = cassandra_instance.get(vector_id="test_id")
assert result is not None
assert isinstance(result, OutputData)
assert result.id == "test_id"
def test_list_cols(cassandra_instance):
"""Test listing collections."""
# Mock the database response
mock_row1 = Mock()
mock_row1.table_name = "collection1"
mock_row2 = Mock()
mock_row2.table_name = "collection2"
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
collections = cassandra_instance.list_cols()
assert isinstance(collections, list)
assert len(collections) == 2
assert "collection1" in collections
def test_delete_col(cassandra_instance):
"""Test collection deletion."""
cassandra_instance.delete_col()
assert cassandra_instance.session.execute.called
def test_col_info(cassandra_instance):
"""Test getting collection information."""
# Mock the database response
mock_row = Mock()
mock_row.count = 100
mock_result = Mock()
mock_result.one = Mock(return_value=mock_row)
cassandra_instance.session.execute = Mock(return_value=mock_result)
info = cassandra_instance.col_info()
assert isinstance(info, dict)
assert 'name' in info
assert 'keyspace' in info
def test_list(cassandra_instance):
"""Test listing vectors."""
# Mock the database response
mock_row = Mock()
mock_row.id = 'id1'
mock_row.vector = [0.1, 0.2, 0.3]
mock_row.payload = json.dumps({"text": "test1"})
cassandra_instance.session.execute = Mock(return_value=[mock_row])
results = cassandra_instance.list(limit=10)
assert isinstance(results, list)
assert len(results) > 0
def test_reset(cassandra_instance):
"""Test resetting the collection."""
cassandra_instance.reset()
assert cassandra_instance.session.execute.called
def test_astra_db_connection(mock_cluster, mock_session):
"""Test connection with DataStax Astra DB secure connect bundle."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
secure_connect_bundle='/path/to/bundle.zip'
)
assert instance.secure_connect_bundle == '/path/to/bundle.zip'
def test_search_with_filters(cassandra_instance):
"""Test vector search with filters."""
# Mock the database response
mock_row1 = Mock()
mock_row1.id = 'id1'
mock_row1.vector = [0.1, 0.2, 0.3]
mock_row1.payload = json.dumps({"text": "test1", "category": "A"})
mock_row2 = Mock()
mock_row2.id = 'id2'
mock_row2.vector = [0.4, 0.5, 0.6]
mock_row2.payload = json.dumps({"text": "test2", "category": "B"})
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
query_vector = [0.2, 0.3, 0.4]
results = cassandra_instance.search(
query="test",
vectors=query_vector,
limit=5,
filters={"category": "A"}
)
assert isinstance(results, list)
# Should only return filtered results
for result in results:
assert result.payload.get("category") == "A"
def test_output_data_model():
"""Test OutputData model."""
data = OutputData(
id="test_id",
score=0.95,
payload={"text": "test"}
)
assert data.id == "test_id"
assert data.score == 0.95
assert data.payload == {"text": "test"}
def test_insert_without_ids(cassandra_instance):
"""Test vector insertion without providing IDs."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, payloads=payloads)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_insert_without_payloads(cassandra_instance):
"""Test vector insertion without providing payloads."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
ids = ["id1", "id2"]
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, ids=ids)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_cassandra.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/embeddings/fastembed.py | from typing import Optional, Literal
from mem0.embeddings.base import EmbeddingBase
from mem0.configs.embeddings.base import BaseEmbedderConfig
try:
from fastembed import TextEmbedding
except ImportError:
raise ImportError("FastEmbed is not installed. Please install it using `pip install fastembed`")
class FastEmbedEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "thenlper/gte-large"
self.dense_model = TextEmbedding(model_name = self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Convert the text to embeddings using FastEmbed running in the Onnx runtime
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
embeddings = list(self.dense_model.embed(text))
return embeddings[0]
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/embeddings/fastembed.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:tests/embeddings/test_fastembed_embeddings.py | from unittest.mock import Mock, patch
import pytest
import numpy as np
from mem0.configs.embeddings.base import BaseEmbedderConfig
try:
from mem0.embeddings.fastembed import FastEmbedEmbedding
except ImportError:
pytest.skip("fastembed not installed", allow_module_level=True)
@pytest.fixture
def mock_fastembed_client():
with patch("mem0.embeddings.fastembed.TextEmbedding") as mock_fastembed:
mock_client = Mock()
mock_fastembed.return_value = mock_client
yield mock_client
def test_embed_with_jina_model(mock_fastembed_client):
config = BaseEmbedderConfig(model="jinaai/jina-embeddings-v2-base-en", embedding_dims=768)
embedder = FastEmbedEmbedding(config)
mock_embedding = np.array([0.1, 0.2, 0.3, 0.4, 0.5])
mock_fastembed_client.embed.return_value = iter([mock_embedding])
text = "Sample text to embed."
embedding = embedder.embed(text)
mock_fastembed_client.embed.assert_called_once_with(text)
assert list(embedding) == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_embed_removes_newlines(mock_fastembed_client):
config = BaseEmbedderConfig(model="jinaai/jina-embeddings-v2-base-en", embedding_dims=768)
embedder = FastEmbedEmbedding(config)
mock_embedding = np.array([0.7, 0.8, 0.9])
mock_fastembed_client.embed.return_value = iter([mock_embedding])
text_with_newlines = "Hello\nworld"
embedding = embedder.embed(text_with_newlines)
mock_fastembed_client.embed.assert_called_once_with("Hello world")
assert list(embedding) == [0.7, 0.8, 0.9] | {
"repo_id": "mem0ai/mem0",
"file_path": "tests/embeddings/test_fastembed_embeddings.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/rerankers/base.py | from typing import Optional
from pydantic import BaseModel, Field
class BaseRerankerConfig(BaseModel):
"""
Base configuration for rerankers with only common parameters.
Provider-specific configurations should be handled by separate config classes.
This class contains only the parameters that are common across all reranker providers.
For provider-specific parameters, use the appropriate provider config class.
"""
provider: Optional[str] = Field(default=None, description="The reranker provider to use")
model: Optional[str] = Field(default=None, description="The reranker model to use")
api_key: Optional[str] = Field(default=None, description="The API key for the reranker service")
top_k: Optional[int] = Field(default=None, description="Maximum number of documents to return after reranking")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/base.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/configs/rerankers/cohere.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class CohereRerankerConfig(BaseRerankerConfig):
"""
Configuration class for Cohere reranker-specific parameters.
Inherits from BaseRerankerConfig and adds Cohere-specific settings.
"""
model: Optional[str] = Field(default="rerank-english-v3.0", description="The Cohere rerank model to use")
return_documents: bool = Field(default=False, description="Whether to return the document texts in the response")
max_chunks_per_doc: Optional[int] = Field(default=None, description="Maximum number of chunks per document")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/cohere.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/rerankers/config.py | from typing import Optional
from pydantic import BaseModel, Field
class RerankerConfig(BaseModel):
"""Configuration for rerankers."""
provider: str = Field(description="Reranker provider (e.g., 'cohere', 'sentence_transformer')", default="cohere")
config: Optional[dict] = Field(description="Provider-specific reranker configuration", default=None)
model_config = {"extra": "forbid"}
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/config.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/rerankers/huggingface.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class HuggingFaceRerankerConfig(BaseRerankerConfig):
"""
Configuration class for HuggingFace reranker-specific parameters.
Inherits from BaseRerankerConfig and adds HuggingFace-specific settings.
"""
model: Optional[str] = Field(default="BAAI/bge-reranker-base", description="The HuggingFace model to use for reranking")
device: Optional[str] = Field(default=None, description="Device to run the model on ('cpu', 'cuda', etc.)")
batch_size: int = Field(default=32, description="Batch size for processing documents")
max_length: int = Field(default=512, description="Maximum length for tokenization")
normalize: bool = Field(default=True, description="Whether to normalize scores")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/huggingface.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/rerankers/llm.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class LLMRerankerConfig(BaseRerankerConfig):
"""
Configuration for LLM-based reranker.
Attributes:
model (str): LLM model to use for reranking. Defaults to "gpt-4o-mini".
api_key (str): API key for the LLM provider.
provider (str): LLM provider. Defaults to "openai".
top_k (int): Number of top documents to return after reranking.
temperature (float): Temperature for LLM generation. Defaults to 0.0 for deterministic scoring.
max_tokens (int): Maximum tokens for LLM response. Defaults to 100.
scoring_prompt (str): Custom prompt template for scoring documents.
"""
model: str = Field(
default="gpt-4o-mini",
description="LLM model to use for reranking"
)
api_key: Optional[str] = Field(
default=None,
description="API key for the LLM provider"
)
provider: str = Field(
default="openai",
description="LLM provider (openai, anthropic, etc.)"
)
top_k: Optional[int] = Field(
default=None,
description="Number of top documents to return after reranking"
)
temperature: float = Field(
default=0.0,
description="Temperature for LLM generation"
)
max_tokens: int = Field(
default=100,
description="Maximum tokens for LLM response"
)
scoring_prompt: Optional[str] = Field(
default=None,
description="Custom prompt template for scoring documents"
)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/llm.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/rerankers/sentence_transformer.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class SentenceTransformerRerankerConfig(BaseRerankerConfig):
"""
Configuration class for Sentence Transformer reranker-specific parameters.
Inherits from BaseRerankerConfig and adds Sentence Transformer-specific settings.
"""
model: Optional[str] = Field(default="cross-encoder/ms-marco-MiniLM-L-6-v2", description="The cross-encoder model name to use")
device: Optional[str] = Field(default=None, description="Device to run the model on ('cpu', 'cuda', etc.)")
batch_size: int = Field(default=32, description="Batch size for processing documents")
show_progress_bar: bool = Field(default=False, description="Whether to show progress bar during processing")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/sentence_transformer.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/rerankers/zero_entropy.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class ZeroEntropyRerankerConfig(BaseRerankerConfig):
"""
Configuration for Zero Entropy reranker.
Attributes:
model (str): Model to use for reranking. Defaults to "zerank-1".
api_key (str): Zero Entropy API key. If not provided, will try to read from ZERO_ENTROPY_API_KEY environment variable.
top_k (int): Number of top documents to return after reranking.
"""
model: str = Field(
default="zerank-1",
description="Model to use for reranking. Available models: zerank-1, zerank-1-small"
)
api_key: Optional[str] = Field(
default=None,
description="Zero Entropy API key"
)
top_k: Optional[int] = Field(
default=None,
description="Number of top documents to return after reranking"
)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/rerankers/zero_entropy.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/reranker/base.py | from abc import ABC, abstractmethod
from typing import List, Dict, Any
class BaseReranker(ABC):
"""Abstract base class for all rerankers."""
@abstractmethod
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents based on relevance to the query.
Args:
query: The search query
documents: List of documents to rerank, each with 'memory' field
top_k: Number of top documents to return (None = return all)
Returns:
List of reranked documents with added 'rerank_score' field
"""
pass | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/base.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/reranker/cohere_reranker.py | import os
from typing import List, Dict, Any
from mem0.reranker.base import BaseReranker
try:
import cohere
COHERE_AVAILABLE = True
except ImportError:
COHERE_AVAILABLE = False
class CohereReranker(BaseReranker):
"""Cohere-based reranker implementation."""
def __init__(self, config):
"""
Initialize Cohere reranker.
Args:
config: CohereRerankerConfig object with configuration parameters
"""
if not COHERE_AVAILABLE:
raise ImportError("cohere package is required for CohereReranker. Install with: pip install cohere")
self.config = config
self.api_key = config.api_key or os.getenv("COHERE_API_KEY")
if not self.api_key:
raise ValueError("Cohere API key is required. Set COHERE_API_KEY environment variable or pass api_key in config.")
self.model = config.model
self.client = cohere.Client(self.api_key)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents using Cohere's rerank API.
Args:
query: The search query
documents: List of documents to rerank
top_k: Number of top documents to return
Returns:
List of reranked documents with rerank_score
"""
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Call Cohere rerank API
response = self.client.rerank(
model=self.model,
query=query,
documents=doc_texts,
top_n=top_k or self.config.top_k or len(documents),
return_documents=self.config.return_documents,
max_chunks_per_doc=self.config.max_chunks_per_doc,
)
# Create reranked results
reranked_docs = []
for result in response.results:
original_doc = documents[result.index].copy()
original_doc['rerank_score'] = result.relevance_score
reranked_docs.append(original_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
return documents[:top_k] if top_k else documents | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/cohere_reranker.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/reranker/huggingface_reranker.py | from typing import List, Dict, Any, Union
import numpy as np
from mem0.reranker.base import BaseReranker
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.huggingface import HuggingFaceRerankerConfig
try:
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
class HuggingFaceReranker(BaseReranker):
"""HuggingFace Transformers based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, HuggingFaceRerankerConfig, Dict]):
"""
Initialize HuggingFace reranker.
Args:
config: Configuration object with reranker parameters
"""
if not TRANSFORMERS_AVAILABLE:
raise ImportError("transformers package is required for HuggingFaceReranker. Install with: pip install transformers torch")
# Convert to HuggingFaceRerankerConfig if needed
if isinstance(config, dict):
config = HuggingFaceRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, HuggingFaceRerankerConfig):
# Convert BaseRerankerConfig to HuggingFaceRerankerConfig with defaults
config = HuggingFaceRerankerConfig(
provider=getattr(config, 'provider', 'huggingface'),
model=getattr(config, 'model', 'BAAI/bge-reranker-base'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
device=None, # Will auto-detect
batch_size=32, # Default
max_length=512, # Default
normalize=True, # Default
)
self.config = config
# Set device
if self.config.device is None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = self.config.device
# Load model and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.config.model)
self.model = AutoModelForSequenceClassification.from_pretrained(self.config.model)
self.model.to(self.device)
self.model.eval()
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents using HuggingFace cross-encoder model.
Args:
query: The search query
documents: List of documents to rerank
top_k: Number of top documents to return
Returns:
List of reranked documents with rerank_score
"""
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
scores = []
# Process documents in batches
for i in range(0, len(doc_texts), self.config.batch_size):
batch_docs = doc_texts[i:i + self.config.batch_size]
batch_pairs = [[query, doc] for doc in batch_docs]
# Tokenize batch
inputs = self.tokenizer(
batch_pairs,
padding=True,
truncation=True,
max_length=self.config.max_length,
return_tensors="pt"
).to(self.device)
# Get scores
with torch.no_grad():
outputs = self.model(**inputs)
batch_scores = outputs.logits.squeeze(-1).cpu().numpy()
# Handle single item case
if batch_scores.ndim == 0:
batch_scores = [float(batch_scores)]
else:
batch_scores = batch_scores.tolist()
scores.extend(batch_scores)
# Normalize scores if requested
if self.config.normalize:
scores = np.array(scores)
scores = (scores - scores.min()) / (scores.max() - scores.min() + 1e-8)
scores = scores.tolist()
# Combine documents with scores
doc_score_pairs = list(zip(documents, scores))
# Sort by score (descending)
doc_score_pairs.sort(key=lambda x: x[1], reverse=True)
# Apply top_k limit
final_top_k = top_k or self.config.top_k
if final_top_k:
doc_score_pairs = doc_score_pairs[:final_top_k]
# Create reranked results
reranked_docs = []
for doc, score in doc_score_pairs:
reranked_doc = doc.copy()
reranked_doc['rerank_score'] = float(score)
reranked_docs.append(reranked_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
final_top_k = top_k or self.config.top_k
return documents[:final_top_k] if final_top_k else documents | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/huggingface_reranker.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/reranker/llm_reranker.py | import re
from typing import List, Dict, Any, Union
from mem0.reranker.base import BaseReranker
from mem0.utils.factory import LlmFactory
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.llm import LLMRerankerConfig
class LLMReranker(BaseReranker):
"""LLM-based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, LLMRerankerConfig, Dict]):
"""
Initialize LLM reranker.
Args:
config: Configuration object with reranker parameters
"""
# Convert to LLMRerankerConfig if needed
if isinstance(config, dict):
config = LLMRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, LLMRerankerConfig):
# Convert BaseRerankerConfig to LLMRerankerConfig with defaults
config = LLMRerankerConfig(
provider=getattr(config, 'provider', 'openai'),
model=getattr(config, 'model', 'gpt-4o-mini'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
temperature=0.0, # Default for reranking
max_tokens=100, # Default for reranking
)
self.config = config
# Create LLM configuration for the factory
llm_config = {
"model": self.config.model,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
}
# Add API key if provided
if self.config.api_key:
llm_config["api_key"] = self.config.api_key
# Initialize LLM using the factory
self.llm = LlmFactory.create(self.config.provider, llm_config)
# Default scoring prompt
self.scoring_prompt = getattr(self.config, 'scoring_prompt', None) or self._get_default_prompt()
def _get_default_prompt(self) -> str:
"""Get the default scoring prompt template."""
return """You are a relevance scoring assistant. Given a query and a document, you need to score how relevant the document is to the query.
Score the relevance on a scale from 0.0 to 1.0, where:
- 1.0 = Perfectly relevant and directly answers the query
- 0.8-0.9 = Highly relevant with good information
- 0.6-0.7 = Moderately relevant with some useful information
- 0.4-0.5 = Slightly relevant with limited useful information
- 0.0-0.3 = Not relevant or no useful information
Query: "{query}"
Document: "{document}"
Provide only a single numerical score between 0.0 and 1.0. Do not include any explanation or additional text."""
def _extract_score(self, response_text: str) -> float:
"""Extract numerical score from LLM response."""
# Look for decimal numbers between 0.0 and 1.0
pattern = r'\b([01](?:\.\d+)?)\b'
matches = re.findall(pattern, response_text)
if matches:
score = float(matches[0])
return min(max(score, 0.0), 1.0) # Clamp between 0.0 and 1.0
# Fallback: return 0.5 if no valid score found
return 0.5
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents using LLM scoring.
Args:
query: The search query
documents: List of documents to rerank
top_k: Number of top documents to return
Returns:
List of reranked documents with rerank_score
"""
if not documents:
return documents
scored_docs = []
for doc in documents:
# Extract text content
if 'memory' in doc:
doc_text = doc['memory']
elif 'text' in doc:
doc_text = doc['text']
elif 'content' in doc:
doc_text = doc['content']
else:
doc_text = str(doc)
try:
# Generate scoring prompt
prompt = self.scoring_prompt.format(query=query, document=doc_text)
# Get LLM response
response = self.llm.generate_response(
messages=[{"role": "user", "content": prompt}]
)
# Extract score from response
score = self._extract_score(response)
# Create scored document
scored_doc = doc.copy()
scored_doc['rerank_score'] = score
scored_docs.append(scored_doc)
except Exception:
# Fallback: assign neutral score if scoring fails
scored_doc = doc.copy()
scored_doc['rerank_score'] = 0.5
scored_docs.append(scored_doc)
# Sort by relevance score in descending order
scored_docs.sort(key=lambda x: x['rerank_score'], reverse=True)
# Apply top_k limit
if top_k:
scored_docs = scored_docs[:top_k]
elif self.config.top_k:
scored_docs = scored_docs[:self.config.top_k]
return scored_docs | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/llm_reranker.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/reranker/sentence_transformer_reranker.py | from typing import List, Dict, Any, Union
import numpy as np
from mem0.reranker.base import BaseReranker
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.sentence_transformer import SentenceTransformerRerankerConfig
try:
from sentence_transformers import SentenceTransformer
SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
SENTENCE_TRANSFORMERS_AVAILABLE = False
class SentenceTransformerReranker(BaseReranker):
"""Sentence Transformer based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, SentenceTransformerRerankerConfig, Dict]):
"""
Initialize Sentence Transformer reranker.
Args:
config: Configuration object with reranker parameters
"""
if not SENTENCE_TRANSFORMERS_AVAILABLE:
raise ImportError("sentence-transformers package is required for SentenceTransformerReranker. Install with: pip install sentence-transformers")
# Convert to SentenceTransformerRerankerConfig if needed
if isinstance(config, dict):
config = SentenceTransformerRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, SentenceTransformerRerankerConfig):
# Convert BaseRerankerConfig to SentenceTransformerRerankerConfig with defaults
config = SentenceTransformerRerankerConfig(
provider=getattr(config, 'provider', 'sentence_transformer'),
model=getattr(config, 'model', 'cross-encoder/ms-marco-MiniLM-L-6-v2'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
device=None, # Will auto-detect
batch_size=32, # Default
show_progress_bar=False, # Default
)
self.config = config
self.model = SentenceTransformer(self.config.model, device=self.config.device)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents using sentence transformer cross-encoder.
Args:
query: The search query
documents: List of documents to rerank
top_k: Number of top documents to return
Returns:
List of reranked documents with rerank_score
"""
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Create query-document pairs
pairs = [[query, doc_text] for doc_text in doc_texts]
# Get similarity scores
scores = self.model.predict(pairs)
if isinstance(scores, np.ndarray):
scores = scores.tolist()
# Combine documents with scores
doc_score_pairs = list(zip(documents, scores))
# Sort by score (descending)
doc_score_pairs.sort(key=lambda x: x[1], reverse=True)
# Apply top_k limit
final_top_k = top_k or self.config.top_k
if final_top_k:
doc_score_pairs = doc_score_pairs[:final_top_k]
# Create reranked results
reranked_docs = []
for doc, score in doc_score_pairs:
reranked_doc = doc.copy()
reranked_doc['rerank_score'] = float(score)
reranked_docs.append(reranked_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
final_top_k = top_k or self.config.top_k
return documents[:final_top_k] if final_top_k else documents | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/sentence_transformer_reranker.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/reranker/zero_entropy_reranker.py | import os
from typing import List, Dict, Any
from mem0.reranker.base import BaseReranker
try:
from zeroentropy import ZeroEntropy
ZERO_ENTROPY_AVAILABLE = True
except ImportError:
ZERO_ENTROPY_AVAILABLE = False
class ZeroEntropyReranker(BaseReranker):
"""Zero Entropy-based reranker implementation."""
def __init__(self, config):
"""
Initialize Zero Entropy reranker.
Args:
config: ZeroEntropyRerankerConfig object with configuration parameters
"""
if not ZERO_ENTROPY_AVAILABLE:
raise ImportError("zeroentropy package is required for ZeroEntropyReranker. Install with: pip install zeroentropy")
self.config = config
self.api_key = config.api_key or os.getenv("ZERO_ENTROPY_API_KEY")
if not self.api_key:
raise ValueError("Zero Entropy API key is required. Set ZERO_ENTROPY_API_KEY environment variable or pass api_key in config.")
self.model = config.model or "zerank-1"
# Initialize Zero Entropy client
if self.api_key:
self.client = ZeroEntropy(api_key=self.api_key)
else:
self.client = ZeroEntropy() # Will use ZERO_ENTROPY_API_KEY from environment
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
"""
Rerank documents using Zero Entropy's rerank API.
Args:
query: The search query
documents: List of documents to rerank
top_k: Number of top documents to return
Returns:
List of reranked documents with rerank_score
"""
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Call Zero Entropy rerank API
response = self.client.models.rerank(
model=self.model,
query=query,
documents=doc_texts,
)
# Create reranked results
reranked_docs = []
for result in response.results:
original_doc = documents[result.index].copy()
original_doc['rerank_score'] = result.relevance_score
reranked_docs.append(original_doc)
# Sort by relevance score in descending order
reranked_docs.sort(key=lambda x: x['rerank_score'], reverse=True)
# Apply top_k limit
if top_k:
reranked_docs = reranked_docs[:top_k]
elif self.config.top_k:
reranked_docs = reranked_docs[:self.config.top_k]
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
return documents[:top_k] if top_k else documents | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/reranker/zero_entropy_reranker.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/utils/gcp_auth.py | import os
import json
from typing import Optional, Dict, Any
try:
from google.oauth2 import service_account
from google.auth import default
import google.auth.credentials
except ImportError:
raise ImportError("google-auth is required for GCP authentication. Install with: pip install google-auth")
class GCPAuthenticator:
"""
Centralized GCP authentication handler that supports multiple credential methods.
Priority order:
1. service_account_json (dict) - In-memory service account credentials
2. credentials_path (str) - Path to service account JSON file
3. Environment variables (GOOGLE_APPLICATION_CREDENTIALS)
4. Default credentials (for environments like GCE, Cloud Run, etc.)
"""
@staticmethod
def get_credentials(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
scopes: Optional[list] = None
) -> tuple[google.auth.credentials.Credentials, Optional[str]]:
"""
Get Google credentials using the priority order defined above.
Args:
service_account_json: Service account credentials as a dictionary
credentials_path: Path to service account JSON file
scopes: List of OAuth scopes (optional)
Returns:
tuple: (credentials, project_id)
Raises:
ValueError: If no valid credentials are found
"""
credentials = None
project_id = None
# Method 1: Service account JSON (in-memory)
if service_account_json:
credentials = service_account.Credentials.from_service_account_info(
service_account_json, scopes=scopes
)
project_id = service_account_json.get("project_id")
# Method 2: Service account file path
elif credentials_path and os.path.isfile(credentials_path):
credentials = service_account.Credentials.from_service_account_file(
credentials_path, scopes=scopes
)
# Extract project_id from the file
with open(credentials_path, 'r') as f:
cred_data = json.load(f)
project_id = cred_data.get("project_id")
# Method 3: Environment variable path
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
env_path = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if os.path.isfile(env_path):
credentials = service_account.Credentials.from_service_account_file(
env_path, scopes=scopes
)
# Extract project_id from the file
with open(env_path, 'r') as f:
cred_data = json.load(f)
project_id = cred_data.get("project_id")
# Method 4: Default credentials (GCE, Cloud Run, etc.)
if not credentials:
try:
credentials, project_id = default(scopes=scopes)
except Exception as e:
raise ValueError(
f"No valid GCP credentials found. Please provide one of:\n"
f"1. service_account_json parameter (dict)\n"
f"2. credentials_path parameter (file path)\n"
f"3. GOOGLE_APPLICATION_CREDENTIALS environment variable\n"
f"4. Default credentials (if running on GCP)\n"
f"Error: {e}"
)
return credentials, project_id
@staticmethod
def setup_vertex_ai(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
project_id: Optional[str] = None,
location: str = "us-central1"
) -> str:
"""
Initialize Vertex AI with proper authentication.
Args:
service_account_json: Service account credentials as dict
credentials_path: Path to service account JSON file
project_id: GCP project ID (optional, will be auto-detected)
location: GCP location/region
Returns:
str: The project ID being used
Raises:
ValueError: If authentication fails
"""
try:
import vertexai
except ImportError:
raise ImportError("google-cloud-aiplatform is required for Vertex AI. Install with: pip install google-cloud-aiplatform")
credentials, detected_project_id = GCPAuthenticator.get_credentials(
service_account_json=service_account_json,
credentials_path=credentials_path,
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Use provided project_id or fall back to detected one
final_project_id = project_id or detected_project_id or os.getenv("GOOGLE_CLOUD_PROJECT")
if not final_project_id:
raise ValueError("Project ID could not be determined. Please provide project_id parameter or set GOOGLE_CLOUD_PROJECT environment variable.")
vertexai.init(project=final_project_id, location=location, credentials=credentials)
return final_project_id
@staticmethod
def get_genai_client(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
api_key: Optional[str] = None
):
"""
Get a Google GenAI client with authentication.
Args:
service_account_json: Service account credentials as dict
credentials_path: Path to service account JSON file
api_key: API key (takes precedence over service account)
Returns:
Google GenAI client instance
"""
try:
from google.genai import Client as GenAIClient
except ImportError:
raise ImportError("google-genai is required. Install with: pip install google-genai")
# If API key is provided, use it directly
if api_key:
return GenAIClient(api_key=api_key)
# Otherwise, try service account authentication
credentials, _ = GCPAuthenticator.get_credentials(
service_account_json=service_account_json,
credentials_path=credentials_path,
scopes=["https://www.googleapis.com/auth/generative-language"]
)
return GenAIClient(credentials=credentials) | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/utils/gcp_auth.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_milvus.py | """
Unit tests for Milvus vector store implementation.
These tests verify:
1. Correct type handling for vector dimensions
2. Batch insert functionality
3. Filter creation for metadata queries
4. Update/upsert operations
"""
import pytest
from unittest.mock import MagicMock, patch
from mem0.vector_stores.milvus import MilvusDB
from mem0.configs.vector_stores.milvus import MetricType
class TestMilvusDB:
"""Test suite for MilvusDB vector store."""
@pytest.fixture
def mock_milvus_client(self):
"""Mock MilvusClient to avoid requiring actual Milvus instance."""
with patch('mem0.vector_stores.milvus.MilvusClient') as mock_client:
mock_instance = MagicMock()
mock_instance.has_collection.return_value = False
mock_client.return_value = mock_instance
yield mock_instance
@pytest.fixture
def milvus_db(self, mock_milvus_client):
"""Create MilvusDB instance with mocked client."""
return MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="test_collection",
embedding_model_dims=1536, # Should be int, not str
metric_type=MetricType.COSINE,
db_name="test_db"
)
def test_initialization_with_int_dims(self, mock_milvus_client):
"""Test that vector dimensions are correctly handled as integers."""
db = MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="test_collection",
embedding_model_dims=1536, # Integer
metric_type=MetricType.COSINE,
db_name="test_db"
)
assert db.embedding_model_dims == 1536
assert isinstance(db.embedding_model_dims, int)
def test_create_col_with_int_vector_size(self, milvus_db, mock_milvus_client):
"""Test collection creation with integer vector size (bug fix validation)."""
# Collection was already created in __init__, but let's verify the call
mock_milvus_client.create_collection.assert_called_once()
call_args = mock_milvus_client.create_collection.call_args
# Verify schema was created properly
assert call_args is not None
def test_batch_insert(self, milvus_db, mock_milvus_client):
"""Test that insert uses batch operation instead of loop (performance fix)."""
ids = ["id1", "id2", "id3"]
vectors = [[0.1] * 1536, [0.2] * 1536, [0.3] * 1536]
payloads = [{"user_id": "alice"}, {"user_id": "bob"}, {"user_id": "charlie"}]
milvus_db.insert(ids, vectors, payloads)
# Verify insert was called once with all data (batch), not 3 times
assert mock_milvus_client.insert.call_count == 1
# Verify the data structure
call_args = mock_milvus_client.insert.call_args
inserted_data = call_args[1]['data']
assert len(inserted_data) == 3
assert inserted_data[0]['id'] == 'id1'
assert inserted_data[1]['id'] == 'id2'
assert inserted_data[2]['id'] == 'id3'
def test_create_filter_string_value(self, milvus_db):
"""Test filter creation for string metadata values."""
filters = {"user_id": "alice"}
filter_str = milvus_db._create_filter(filters)
assert filter_str == '(metadata["user_id"] == "alice")'
def test_create_filter_numeric_value(self, milvus_db):
"""Test filter creation for numeric metadata values."""
filters = {"age": 25}
filter_str = milvus_db._create_filter(filters)
assert filter_str == '(metadata["age"] == 25)'
def test_create_filter_multiple_conditions(self, milvus_db):
"""Test filter creation with multiple conditions."""
filters = {"user_id": "alice", "category": "work"}
filter_str = milvus_db._create_filter(filters)
# Should join with 'and'
assert 'metadata["user_id"] == "alice"' in filter_str
assert 'metadata["category"] == "work"' in filter_str
assert ' and ' in filter_str
def test_search_with_filters(self, milvus_db, mock_milvus_client):
"""Test search with metadata filters (reproduces user's bug scenario)."""
# Setup mock return value
mock_milvus_client.search.return_value = [[
{"id": "mem1", "distance": 0.8, "entity": {"metadata": {"user_id": "alice"}}}
]]
query_vector = [0.1] * 1536
filters = {"user_id": "alice"}
results = milvus_db.search(
query="test query",
vectors=query_vector,
limit=5,
filters=filters
)
# Verify search was called with correct filter
call_args = mock_milvus_client.search.call_args
assert call_args[1]['filter'] == '(metadata["user_id"] == "alice")'
# Verify results are parsed correctly
assert len(results) == 1
assert results[0].id == "mem1"
assert results[0].score == 0.8
def test_search_different_user_ids(self, milvus_db, mock_milvus_client):
"""Test that search works with different user_ids (reproduces reported bug)."""
# This test validates the fix for: "Error with different user_ids"
# Mock return for first user
mock_milvus_client.search.return_value = [[
{"id": "mem1", "distance": 0.9, "entity": {"metadata": {"user_id": "milvus_user"}}}
]]
results1 = milvus_db.search("test", [0.1] * 1536, filters={"user_id": "milvus_user"})
assert len(results1) == 1
# Mock return for second user
mock_milvus_client.search.return_value = [[
{"id": "mem2", "distance": 0.85, "entity": {"metadata": {"user_id": "bob"}}}
]]
# This should not raise "Unsupported Field type: 0" error
results2 = milvus_db.search("test", [0.2] * 1536, filters={"user_id": "bob"})
assert len(results2) == 1
def test_update_uses_upsert(self, milvus_db, mock_milvus_client):
"""Test that update correctly uses upsert operation."""
vector_id = "test_id"
vector = [0.1] * 1536
payload = {"user_id": "alice", "data": "Updated memory"}
milvus_db.update(vector_id=vector_id, vector=vector, payload=payload)
# Verify upsert was called (not delete+insert)
mock_milvus_client.upsert.assert_called_once()
call_args = mock_milvus_client.upsert.call_args
assert call_args[1]['collection_name'] == "test_collection"
assert call_args[1]['data']['id'] == vector_id
assert call_args[1]['data']['vectors'] == vector
assert call_args[1]['data']['metadata'] == payload
def test_delete(self, milvus_db, mock_milvus_client):
"""Test vector deletion."""
vector_id = "test_id"
milvus_db.delete(vector_id)
mock_milvus_client.delete.assert_called_once_with(
collection_name="test_collection",
ids=vector_id
)
def test_get(self, milvus_db, mock_milvus_client):
"""Test retrieving a vector by ID."""
vector_id = "test_id"
mock_milvus_client.get.return_value = [
{"id": vector_id, "metadata": {"user_id": "alice"}}
]
result = milvus_db.get(vector_id)
assert result.id == vector_id
assert result.payload == {"user_id": "alice"}
assert result.score is None
def test_list_with_filters(self, milvus_db, mock_milvus_client):
"""Test listing memories with filters."""
mock_milvus_client.query.return_value = [
{"id": "mem1", "metadata": {"user_id": "alice"}},
{"id": "mem2", "metadata": {"user_id": "alice"}}
]
results = milvus_db.list(filters={"user_id": "alice"}, limit=10)
# Verify query was called with filter
call_args = mock_milvus_client.query.call_args
assert call_args[1]['filter'] == '(metadata["user_id"] == "alice")'
assert call_args[1]['limit'] == 10
# Verify results
assert len(results[0]) == 2
def test_parse_output(self, milvus_db):
"""Test output data parsing."""
raw_data = [
{
"id": "mem1",
"distance": 0.9,
"entity": {"metadata": {"user_id": "alice"}}
},
{
"id": "mem2",
"distance": 0.85,
"entity": {"metadata": {"user_id": "bob"}}
}
]
parsed = milvus_db._parse_output(raw_data)
assert len(parsed) == 2
assert parsed[0].id == "mem1"
assert parsed[0].score == 0.9
assert parsed[0].payload == {"user_id": "alice"}
assert parsed[1].id == "mem2"
assert parsed[1].score == 0.85
def test_collection_already_exists(self, mock_milvus_client):
"""Test that existing collection is not recreated."""
mock_milvus_client.has_collection.return_value = True
MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="existing_collection",
embedding_model_dims=1536,
metric_type=MetricType.L2,
db_name="test_db"
)
# create_collection should not be called
mock_milvus_client.create_collection.assert_not_called()
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_milvus.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/vector_stores/azure_mysql.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class AzureMySQLConfig(BaseModel):
"""Configuration for Azure MySQL vector database."""
host: str = Field(..., description="MySQL server host (e.g., myserver.mysql.database.azure.com)")
port: int = Field(3306, description="MySQL server port")
user: str = Field(..., description="Database user")
password: Optional[str] = Field(None, description="Database password (not required if using Azure credential)")
database: str = Field(..., description="Database name")
collection_name: str = Field("mem0", description="Collection/table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
use_azure_credential: bool = Field(
False,
description="Use Azure DefaultAzureCredential for authentication instead of password"
)
ssl_ca: Optional[str] = Field(None, description="Path to SSL CA certificate")
ssl_disabled: bool = Field(False, description="Disable SSL connection (not recommended for production)")
minconn: int = Field(1, description="Minimum number of connections in the pool")
maxconn: int = Field(5, description="Maximum number of connections in the pool")
connection_pool: Optional[Any] = Field(
None,
description="Pre-configured connection pool object (overrides other connection parameters)"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate authentication parameters."""
# If connection_pool is provided, skip validation
if values.get("connection_pool") is not None:
return values
use_azure_credential = values.get("use_azure_credential", False)
password = values.get("password")
# Either password or Azure credential must be provided
if not use_azure_credential and not password:
raise ValueError(
"Either 'password' must be provided or 'use_azure_credential' must be set to True"
)
return values
@model_validator(mode="before")
@classmethod
def check_required_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate required fields."""
# If connection_pool is provided, skip validation of individual parameters
if values.get("connection_pool") is not None:
return values
required_fields = ["host", "user", "database"]
missing_fields = [field for field in required_fields if not values.get(field)]
if missing_fields:
raise ValueError(
f"Missing required fields: {', '.join(missing_fields)}. "
f"These fields are required when not using a pre-configured connection_pool."
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/azure_mysql.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/vector_stores/azure_mysql.py | import json
import logging
from contextlib import contextmanager
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
try:
import pymysql
from pymysql.cursors import DictCursor
from dbutils.pooled_db import PooledDB
except ImportError:
raise ImportError(
"Azure MySQL vector store requires PyMySQL and DBUtils. "
"Please install them using 'pip install pymysql dbutils'"
)
try:
from azure.identity import DefaultAzureCredential
AZURE_IDENTITY_AVAILABLE = True
except ImportError:
AZURE_IDENTITY_AVAILABLE = False
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class AzureMySQL(VectorStoreBase):
def __init__(
self,
host: str,
port: int,
user: str,
password: Optional[str],
database: str,
collection_name: str,
embedding_model_dims: int,
use_azure_credential: bool = False,
ssl_ca: Optional[str] = None,
ssl_disabled: bool = False,
minconn: int = 1,
maxconn: int = 5,
connection_pool: Optional[Any] = None,
):
"""
Initialize the Azure MySQL vector store.
Args:
host (str): MySQL server host
port (int): MySQL server port
user (str): Database user
password (str, optional): Database password (not required if using Azure credential)
database (str): Database name
collection_name (str): Collection/table name
embedding_model_dims (int): Dimension of the embedding vector
use_azure_credential (bool): Use Azure DefaultAzureCredential for authentication
ssl_ca (str, optional): Path to SSL CA certificate
ssl_disabled (bool): Disable SSL connection
minconn (int): Minimum number of connections in the pool
maxconn (int): Maximum number of connections in the pool
connection_pool (Any, optional): Pre-configured connection pool
"""
self.host = host
self.port = port
self.user = user
self.password = password
self.database = database
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.use_azure_credential = use_azure_credential
self.ssl_ca = ssl_ca
self.ssl_disabled = ssl_disabled
self.connection_pool = connection_pool
# Handle Azure authentication
if use_azure_credential:
if not AZURE_IDENTITY_AVAILABLE:
raise ImportError(
"Azure Identity is required for Azure credential authentication. "
"Please install it using 'pip install azure-identity'"
)
self._setup_azure_auth()
# Setup connection pool
if self.connection_pool is None:
self._setup_connection_pool(minconn, maxconn)
# Create collection if it doesn't exist
collections = self.list_cols()
if collection_name not in collections:
self.create_col(name=collection_name, vector_size=embedding_model_dims, distance="cosine")
def _setup_azure_auth(self):
"""Setup Azure authentication using DefaultAzureCredential."""
try:
credential = DefaultAzureCredential()
# Get access token for Azure Database for MySQL
token = credential.get_token("https://ossrdbms-aad.database.windows.net/.default")
# Use token as password
self.password = token.token
logger.info("Successfully authenticated using Azure DefaultAzureCredential")
except Exception as e:
logger.error(f"Failed to authenticate with Azure: {e}")
raise
def _setup_connection_pool(self, minconn: int, maxconn: int):
"""Setup MySQL connection pool."""
connect_kwargs = {
"host": self.host,
"port": self.port,
"user": self.user,
"password": self.password,
"database": self.database,
"charset": "utf8mb4",
"cursorclass": DictCursor,
"autocommit": False,
}
# SSL configuration
if not self.ssl_disabled:
ssl_config = {"ssl_verify_cert": True}
if self.ssl_ca:
ssl_config["ssl_ca"] = self.ssl_ca
connect_kwargs["ssl"] = ssl_config
try:
self.connection_pool = PooledDB(
creator=pymysql,
mincached=minconn,
maxcached=maxconn,
maxconnections=maxconn,
blocking=True,
**connect_kwargs
)
logger.info("Successfully created MySQL connection pool")
except Exception as e:
logger.error(f"Failed to create connection pool: {e}")
raise
@contextmanager
def _get_cursor(self, commit: bool = False):
"""
Context manager to get a cursor from the connection pool.
Auto-commits or rolls back based on exception.
"""
conn = self.connection_pool.connection()
cur = conn.cursor()
try:
yield cur
if commit:
conn.commit()
except Exception as exc:
conn.rollback()
logger.error(f"Database error: {exc}", exc_info=True)
raise
finally:
cur.close()
conn.close()
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
"""
Create a new collection (table in MySQL).
Enables vector extension and creates appropriate indexes.
Args:
name (str, optional): Collection name (uses self.collection_name if not provided)
vector_size (int, optional): Vector dimension (uses self.embedding_model_dims if not provided)
distance (str): Distance metric (cosine, euclidean, dot_product)
"""
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
with self._get_cursor(commit=True) as cur:
# Create table with vector column
cur.execute(f"""
CREATE TABLE IF NOT EXISTS `{table_name}` (
id VARCHAR(255) PRIMARY KEY,
vector JSON,
payload JSON,
INDEX idx_payload_keys ((CAST(payload AS CHAR(255)) ARRAY))
)
""")
logger.info(f"Created collection '{table_name}' with vector dimension {dims}")
def insert(self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None):
"""
Insert vectors into the collection.
Args:
vectors (List[List[float]]): List of vectors to insert
payloads (List[Dict], optional): List of payloads corresponding to vectors
ids (List[str], optional): List of IDs corresponding to vectors
"""
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
payloads = [{}] * len(vectors)
if ids is None:
import uuid
ids = [str(uuid.uuid4()) for _ in range(len(vectors))]
data = []
for vector, payload, vec_id in zip(vectors, payloads, ids):
data.append((vec_id, json.dumps(vector), json.dumps(payload)))
with self._get_cursor(commit=True) as cur:
cur.executemany(
f"INSERT INTO `{self.collection_name}` (id, vector, payload) VALUES (%s, %s, %s) "
f"ON DUPLICATE KEY UPDATE vector = VALUES(vector), payload = VALUES(payload)",
data
)
def _cosine_distance(self, vec1_json: str, vec2: List[float]) -> str:
"""Generate SQL for cosine distance calculation."""
# For MySQL, we need to calculate cosine similarity manually
# This is a simplified version - in production, you'd use stored procedures or UDFs
return """
1 - (
(SELECT SUM(a.val * b.val) /
(SQRT(SUM(a.val * a.val)) * SQRT(SUM(b.val * b.val))))
FROM (
SELECT JSON_EXTRACT(vector, CONCAT('$[', idx, ']')) as val
FROM (SELECT @row := @row + 1 as idx FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t1, (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t2) indices
WHERE idx < JSON_LENGTH(vector)
) a,
(
SELECT JSON_EXTRACT(%s, CONCAT('$[', idx, ']')) as val
FROM (SELECT @row := @row + 1 as idx FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t1, (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t2) indices
WHERE idx < JSON_LENGTH(%s)
) b
WHERE a.idx = b.idx
)
"""
def search(
self,
query: str,
vectors: List[float],
limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
"""
Search for similar vectors using cosine similarity.
Args:
query (str): Query string (not used in vector search)
vectors (List[float]): Query vector
limit (int): Number of results to return
filters (Dict, optional): Filters to apply to the search
Returns:
List[OutputData]: Search results
"""
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("JSON_EXTRACT(payload, %s) = %s")
filter_params.extend([f"$.{k}", json.dumps(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
# For simplicity, we'll compute cosine similarity in Python
# In production, you'd want to use MySQL stored procedures or UDFs
with self._get_cursor() as cur:
query_sql = f"""
SELECT id, vector, payload
FROM `{self.collection_name}`
{filter_clause}
"""
cur.execute(query_sql, filter_params)
results = cur.fetchall()
# Calculate cosine similarity in Python
import numpy as np
query_vec = np.array(vectors)
scored_results = []
for row in results:
vec = np.array(json.loads(row['vector']))
# Cosine similarity
similarity = np.dot(query_vec, vec) / (np.linalg.norm(query_vec) * np.linalg.norm(vec))
distance = 1 - similarity
scored_results.append((row['id'], distance, row['payload']))
# Sort by distance and limit
scored_results.sort(key=lambda x: x[1])
scored_results = scored_results[:limit]
return [
OutputData(id=r[0], score=float(r[1]), payload=json.loads(r[2]) if isinstance(r[2], str) else r[2])
for r in scored_results
]
def delete(self, vector_id: str):
"""
Delete a vector by ID.
Args:
vector_id (str): ID of the vector to delete
"""
with self._get_cursor(commit=True) as cur:
cur.execute(f"DELETE FROM `{self.collection_name}` WHERE id = %s", (vector_id,))
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
"""
Update a vector and its payload.
Args:
vector_id (str): ID of the vector to update
vector (List[float], optional): Updated vector
payload (Dict, optional): Updated payload
"""
with self._get_cursor(commit=True) as cur:
if vector is not None:
cur.execute(
f"UPDATE `{self.collection_name}` SET vector = %s WHERE id = %s",
(json.dumps(vector), vector_id),
)
if payload is not None:
cur.execute(
f"UPDATE `{self.collection_name}` SET payload = %s WHERE id = %s",
(json.dumps(payload), vector_id),
)
def get(self, vector_id: str) -> Optional[OutputData]:
"""
Retrieve a vector by ID.
Args:
vector_id (str): ID of the vector to retrieve
Returns:
OutputData: Retrieved vector or None if not found
"""
with self._get_cursor() as cur:
cur.execute(
f"SELECT id, vector, payload FROM `{self.collection_name}` WHERE id = %s",
(vector_id,),
)
result = cur.fetchone()
if not result:
return None
return OutputData(
id=result['id'],
score=None,
payload=json.loads(result['payload']) if isinstance(result['payload'], str) else result['payload']
)
def list_cols(self) -> List[str]:
"""
List all collections (tables).
Returns:
List[str]: List of collection names
"""
with self._get_cursor() as cur:
cur.execute("SHOW TABLES")
return [row[f"Tables_in_{self.database}"] for row in cur.fetchall()]
def delete_col(self):
"""Delete the collection (table)."""
with self._get_cursor(commit=True) as cur:
cur.execute(f"DROP TABLE IF EXISTS `{self.collection_name}`")
logger.info(f"Deleted collection '{self.collection_name}'")
def col_info(self) -> Dict[str, Any]:
"""
Get information about the collection.
Returns:
Dict[str, Any]: Collection information
"""
with self._get_cursor() as cur:
cur.execute("""
SELECT
TABLE_NAME as name,
TABLE_ROWS as count,
ROUND(((DATA_LENGTH + INDEX_LENGTH) / 1024 / 1024), 2) as size_mb
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
""", (self.database, self.collection_name))
result = cur.fetchone()
if result:
return {
"name": result['name'],
"count": result['count'],
"size": f"{result['size_mb']} MB"
}
return {}
def list(
self,
filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
"""
List all vectors in the collection.
Args:
filters (Dict, optional): Filters to apply
limit (int): Number of vectors to return
Returns:
List[List[OutputData]]: List of vectors
"""
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("JSON_EXTRACT(payload, %s) = %s")
filter_params.extend([f"$.{k}", json.dumps(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
with self._get_cursor() as cur:
cur.execute(
f"""
SELECT id, vector, payload
FROM `{self.collection_name}`
{filter_clause}
LIMIT %s
""",
(*filter_params, limit)
)
results = cur.fetchall()
return [[
OutputData(
id=r['id'],
score=None,
payload=json.loads(r['payload']) if isinstance(r['payload'], str) else r['payload']
) for r in results
]]
def reset(self):
"""Reset the collection by deleting and recreating it."""
logger.warning(f"Resetting collection {self.collection_name}...")
self.delete_col()
self.create_col(name=self.collection_name, vector_size=self.embedding_model_dims)
def __del__(self):
"""Close the connection pool when the object is deleted."""
try:
if hasattr(self, 'connection_pool') and self.connection_pool:
self.connection_pool.close()
except Exception:
pass
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/azure_mysql.py",
"license": "Apache License 2.0",
"lines": 404,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_azure_mysql.py | import json
import pytest
from unittest.mock import Mock, patch
from mem0.vector_stores.azure_mysql import AzureMySQL, OutputData
@pytest.fixture
def mock_connection_pool():
"""Create a mock connection pool."""
pool = Mock()
conn = Mock()
cursor = Mock()
# Setup cursor mock
cursor.fetchall = Mock(return_value=[])
cursor.fetchone = Mock(return_value=None)
cursor.execute = Mock()
cursor.executemany = Mock()
cursor.close = Mock()
# Setup connection mock
conn.cursor = Mock(return_value=cursor)
conn.commit = Mock()
conn.rollback = Mock()
conn.close = Mock()
# Setup pool mock
pool.connection = Mock(return_value=conn)
pool.close = Mock()
return pool
@pytest.fixture
def azure_mysql_instance(mock_connection_pool):
"""Create an AzureMySQL instance with mocked connection pool."""
with patch('mem0.vector_stores.azure_mysql.PooledDB') as mock_pooled_db:
mock_pooled_db.return_value = mock_connection_pool
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password="testpass",
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
use_azure_credential=False,
ssl_disabled=True,
)
instance.connection_pool = mock_connection_pool
return instance
def test_azure_mysql_init(mock_connection_pool):
"""Test AzureMySQL initialization."""
with patch('mem0.vector_stores.azure_mysql.PooledDB') as mock_pooled_db:
mock_pooled_db.return_value = mock_connection_pool
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password="testpass",
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
)
assert instance.host == "test-server.mysql.database.azure.com"
assert instance.port == 3306
assert instance.user == "testuser"
assert instance.database == "testdb"
assert instance.collection_name == "test_collection"
assert instance.embedding_model_dims == 128
def test_create_col(azure_mysql_instance):
"""Test collection creation."""
azure_mysql_instance.create_col(name="new_collection", vector_size=256)
# Verify that execute was called (table creation)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_insert(azure_mysql_instance):
"""Test vector insertion."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
ids = ["id1", "id2"]
azure_mysql_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.executemany.called
def test_search(azure_mysql_instance):
"""Test vector search."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{
'id': 'id1',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test1"})
},
{
'id': 'id2',
'vector': json.dumps([0.4, 0.5, 0.6]),
'payload': json.dumps({"text": "test2"})
}
])
query_vector = [0.2, 0.3, 0.4]
results = azure_mysql_instance.search(query="test", vectors=query_vector, limit=5)
assert isinstance(results, list)
assert cursor.execute.called
def test_delete(azure_mysql_instance):
"""Test vector deletion."""
azure_mysql_instance.delete(vector_id="test_id")
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_update(azure_mysql_instance):
"""Test vector update."""
new_vector = [0.7, 0.8, 0.9]
new_payload = {"text": "updated"}
azure_mysql_instance.update(vector_id="test_id", vector=new_vector, payload=new_payload)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_get(azure_mysql_instance):
"""Test retrieving a vector by ID."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchone = Mock(return_value={
'id': 'test_id',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test"})
})
result = azure_mysql_instance.get(vector_id="test_id")
assert result is not None
assert isinstance(result, OutputData)
assert result.id == "test_id"
def test_list_cols(azure_mysql_instance):
"""Test listing collections."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{"Tables_in_testdb": "collection1"},
{"Tables_in_testdb": "collection2"}
])
collections = azure_mysql_instance.list_cols()
assert isinstance(collections, list)
assert len(collections) == 2
def test_delete_col(azure_mysql_instance):
"""Test collection deletion."""
azure_mysql_instance.delete_col()
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_col_info(azure_mysql_instance):
"""Test getting collection information."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchone = Mock(return_value={
'name': 'test_collection',
'count': 100,
'size_mb': 1.5
})
info = azure_mysql_instance.col_info()
assert isinstance(info, dict)
assert cursor.execute.called
def test_list(azure_mysql_instance):
"""Test listing vectors."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{
'id': 'id1',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test1"})
}
])
results = azure_mysql_instance.list(limit=10)
assert isinstance(results, list)
assert len(results) > 0
def test_reset(azure_mysql_instance):
"""Test resetting the collection."""
azure_mysql_instance.reset()
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
# Should call execute at least twice (drop and create)
assert cursor.execute.call_count >= 2
@pytest.mark.skipif(True, reason="Requires Azure credentials")
def test_azure_credential_authentication():
"""Test Azure DefaultAzureCredential authentication."""
with patch('mem0.vector_stores.azure_mysql.DefaultAzureCredential') as mock_cred:
mock_token = Mock()
mock_token.token = "test_token"
mock_cred.return_value.get_token.return_value = mock_token
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password=None,
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
use_azure_credential=True,
)
assert instance.password == "test_token"
def test_output_data_model():
"""Test OutputData model."""
data = OutputData(
id="test_id",
score=0.95,
payload={"text": "test"}
)
assert data.id == "test_id"
assert data.score == 0.95
assert data.payload == {"text": "test"}
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_azure_mysql.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:tests/memory/test_storage.py | import os
import sqlite3
import tempfile
import uuid
from datetime import datetime
import pytest
from mem0.memory.storage import SQLiteManager
class TestSQLiteManager:
"""Comprehensive test cases for SQLiteManager class."""
@pytest.fixture
def temp_db_path(self):
"""Create temporary database file."""
temp_db = tempfile.NamedTemporaryFile(delete=False, suffix=".db")
temp_db.close()
yield temp_db.name
if os.path.exists(temp_db.name):
os.unlink(temp_db.name)
@pytest.fixture
def sqlite_manager(self, temp_db_path):
"""Create SQLiteManager instance with temporary database."""
manager = SQLiteManager(temp_db_path)
yield manager
if manager.connection:
manager.close()
@pytest.fixture
def memory_manager(self):
"""Create in-memory SQLiteManager instance."""
manager = SQLiteManager(":memory:")
yield manager
if manager.connection:
manager.close()
@pytest.fixture
def sample_data(self):
"""Sample test data."""
now = datetime.now().isoformat()
return {
"memory_id": str(uuid.uuid4()),
"old_memory": "Old memory content",
"new_memory": "New memory content",
"event": "ADD",
"created_at": now,
"updated_at": now,
"actor_id": "test_actor",
"role": "user",
}
# ========== Initialization Tests ==========
@pytest.mark.parametrize("db_type,path", [("file", "temp_db_path"), ("memory", ":memory:")])
def test_initialization(self, db_type, path, request):
"""Test SQLiteManager initialization with different database types."""
if db_type == "file":
db_path = request.getfixturevalue(path)
else:
db_path = path
manager = SQLiteManager(db_path)
assert manager.connection is not None
assert manager.db_path == db_path
manager.close()
def test_table_schema_creation(self, sqlite_manager):
"""Test that history table is created with correct schema."""
cursor = sqlite_manager.connection.cursor()
cursor.execute("PRAGMA table_info(history)")
columns = {row[1] for row in cursor.fetchall()}
expected_columns = {
"id",
"memory_id",
"old_memory",
"new_memory",
"event",
"created_at",
"updated_at",
"is_deleted",
"actor_id",
"role",
}
assert columns == expected_columns
# ========== Add History Tests ==========
def test_add_history_basic(self, sqlite_manager, sample_data):
"""Test basic add_history functionality."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT * FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
result = cursor.fetchone()
assert result is not None
assert result[1] == sample_data["memory_id"]
assert result[2] == sample_data["old_memory"]
assert result[3] == sample_data["new_memory"]
assert result[4] == sample_data["event"]
assert result[8] == sample_data["actor_id"]
assert result[9] == sample_data["role"]
@pytest.mark.parametrize(
"old_memory,new_memory,is_deleted", [(None, "New memory", 0), ("Old memory", None, 1), (None, None, 1)]
)
def test_add_history_optional_params(self, sqlite_manager, sample_data, old_memory, new_memory, is_deleted):
"""Test add_history with various optional parameter combinations."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=old_memory,
new_memory=new_memory,
event="UPDATE",
updated_at=sample_data["updated_at"],
is_deleted=is_deleted,
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT * FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
result = cursor.fetchone()
assert result[2] == old_memory
assert result[3] == new_memory
assert result[6] == sample_data["updated_at"]
assert result[7] == is_deleted
def test_add_history_generates_unique_ids(self, sqlite_manager, sample_data):
"""Test that add_history generates unique IDs for each record."""
for i in range(3):
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=f"Memory {i}",
new_memory=f"Updated Memory {i}",
event="ADD" if i == 0 else "UPDATE",
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT id FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
ids = [row[0] for row in cursor.fetchall()]
assert len(ids) == 3
assert len(set(ids)) == 3
# ========== Get History Tests ==========
def test_get_history_empty(self, sqlite_manager):
"""Test get_history for non-existent memory_id."""
result = sqlite_manager.get_history("non-existent-id")
assert result == []
def test_get_history_single_record(self, sqlite_manager, sample_data):
"""Test get_history for single record."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
result = sqlite_manager.get_history(sample_data["memory_id"])
assert len(result) == 1
record = result[0]
assert record["memory_id"] == sample_data["memory_id"]
assert record["old_memory"] == sample_data["old_memory"]
assert record["new_memory"] == sample_data["new_memory"]
assert record["event"] == sample_data["event"]
assert record["created_at"] == sample_data["created_at"]
assert record["actor_id"] == sample_data["actor_id"]
assert record["role"] == sample_data["role"]
assert record["is_deleted"] is False
def test_get_history_chronological_ordering(self, sqlite_manager, sample_data):
"""Test get_history returns records in chronological order."""
import time
timestamps = []
for i in range(3):
ts = datetime.now().isoformat()
timestamps.append(ts)
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=f"Memory {i}",
new_memory=f"Memory {i+1}",
event="ADD" if i == 0 else "UPDATE",
created_at=ts,
updated_at=ts if i > 0 else None,
)
time.sleep(0.01)
result = sqlite_manager.get_history(sample_data["memory_id"])
result_timestamps = [r["created_at"] for r in result]
assert result_timestamps == sorted(timestamps)
def test_migration_preserves_data(self, temp_db_path, sample_data):
"""Test that migration preserves existing data."""
manager1 = SQLiteManager(temp_db_path)
manager1.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
)
original_data = manager1.get_history(sample_data["memory_id"])
manager1.close()
manager2 = SQLiteManager(temp_db_path)
migrated_data = manager2.get_history(sample_data["memory_id"])
manager2.close()
assert len(migrated_data) == len(original_data)
assert migrated_data[0]["memory_id"] == original_data[0]["memory_id"]
assert migrated_data[0]["new_memory"] == original_data[0]["new_memory"]
def test_large_batch_operations(self, sqlite_manager):
"""Test performance with large batch of operations."""
batch_size = 1000
memory_ids = [str(uuid.uuid4()) for _ in range(batch_size)]
for i, memory_id in enumerate(memory_ids):
sqlite_manager.add_history(
memory_id=memory_id, old_memory=None, new_memory=f"Batch memory {i}", event="ADD"
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT COUNT(*) FROM history")
count = cursor.fetchone()[0]
assert count == batch_size
for memory_id in memory_ids[:10]:
result = sqlite_manager.get_history(memory_id)
assert len(result) == 1
# ========== Tests for Migration, Reset, and Close ==========
def test_explicit_old_schema_migration(self, temp_db_path):
"""Test migration path from a legacy schema to new schema."""
# Create a legacy 'history' table missing new columns
legacy_conn = sqlite3.connect(temp_db_path)
legacy_conn.execute("""
CREATE TABLE history (
id TEXT PRIMARY KEY,
memory_id TEXT,
old_memory TEXT,
new_memory TEXT,
event TEXT,
created_at DATETIME
)
""")
legacy_id = str(uuid.uuid4())
legacy_conn.execute(
"INSERT INTO history (id, memory_id, old_memory, new_memory, event, created_at) VALUES (?, ?, ?, ?, ?, ?)",
(legacy_id, "m1", "o", "n", "ADD", datetime.now().isoformat()),
)
legacy_conn.commit()
legacy_conn.close()
# Trigger migration
mgr = SQLiteManager(temp_db_path)
history = mgr.get_history("m1")
assert len(history) == 1
assert history[0]["id"] == legacy_id
assert history[0]["actor_id"] is None
assert history[0]["is_deleted"] is False
mgr.close()
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_storage.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/graphs/neptune/neptunedb.py | import logging
import uuid
from datetime import datetime
import pytz
from .base import NeptuneBase
try:
from langchain_aws import NeptuneGraph
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it using 'make install_all'.")
logger = logging.getLogger(__name__)
class MemoryGraph(NeptuneBase):
def __init__(self, config):
"""
Initialize the Neptune DB memory store.
"""
self.config = config
self.graph = None
endpoint = self.config.graph_store.config.endpoint
if endpoint and endpoint.startswith("neptune-db://"):
host = endpoint.replace("neptune-db://", "")
port = 8182
self.graph = NeptuneGraph(host, port)
if not self.graph:
raise ValueError("Unable to create a Neptune-DB client: missing 'endpoint' in config")
self.node_label = ":`__Entity__`" if self.config.graph_store.config.base_label else ""
self.embedding_model = NeptuneBase._create_embedding_model(self.config)
# Default to openai if no specific provider is configured
self.llm_provider = "openai"
if self.config.graph_store.llm:
self.llm_provider = self.config.graph_store.llm.provider
elif self.config.llm.provider:
self.llm_provider = self.config.llm.provider
# fetch the vector store as a provider
self.vector_store_provider = self.config.vector_store.provider
if self.config.graph_store.config.collection_name:
vector_store_collection_name = self.config.graph_store.config.collection_name
else:
vector_store_config = self.config.vector_store.config
if vector_store_config.collection_name:
vector_store_collection_name = vector_store_config.collection_name + "_neptune_vector_store"
else:
vector_store_collection_name = "mem0_neptune_vector_store"
self.config.vector_store.config.collection_name = vector_store_collection_name
self.vector_store = NeptuneBase._create_vector_store(self.vector_store_provider, self.config)
self.llm = NeptuneBase._create_llm(self.config, self.llm_provider)
self.user_id = None
# Use threshold from graph_store config, default to 0.7 for backward compatibility
self.threshold = self.config.graph_store.threshold if hasattr(self.config.graph_store, 'threshold') else 0.7
self.vector_store_limit=5
def _delete_entities_cypher(self, source, destination, relationship, user_id):
"""
Returns the OpenCypher query and parameters for deleting entities in the graph DB
:param source: source node
:param destination: destination node
:param relationship: relationship label
:param user_id: user_id to use
:return: str, dict
"""
cypher = f"""
MATCH (n {self.node_label} {{name: $source_name, user_id: $user_id}})
-[r:{relationship}]->
(m {self.node_label} {{name: $dest_name, user_id: $user_id}})
DELETE r
RETURN
n.name AS source,
m.name AS target,
type(r) AS relationship
"""
params = {
"source_name": source,
"dest_name": destination,
"user_id": user_id,
}
logger.debug(f"_delete_entities\n query={cypher}")
return cypher, params
def _add_entities_by_source_cypher(
self,
source_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source_node_list: list of source nodes
:param destination: destination name
:param dest_embedding: destination embedding
:param destination_type: destination node label
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
destination_id = str(uuid.uuid4())
destination_payload = {
"name": destination,
"type": destination_type,
"user_id": user_id,
"created_at": datetime.now(pytz.timezone("US/Pacific")).isoformat(),
}
self.vector_store.insert(
vectors=[dest_embedding],
payloads=[destination_payload],
ids=[destination_id],
)
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET source.mentions = coalesce(source.mentions, 0) + 1
WITH source
MERGE (destination {destination_label} {{`~id`: $destination_id, name: $destination_name, user_id: $user_id}})
ON CREATE SET
destination.created = timestamp(),
destination.updated = timestamp(),
destination.mentions = 1
{destination_extra_set}
ON MATCH SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target, id(destination) AS destination_id
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_id": destination_id,
"destination_name": destination,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_entities_by_destination_cypher(
self,
source,
source_embedding,
source_type,
destination_node_list,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source: source node name
:param source_embedding: source node embedding
:param source_type: source node label
:param destination_node_list: list of dest nodes
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
source_id = str(uuid.uuid4())
source_payload = {
"name": source,
"type": source_type,
"user_id": user_id,
"created_at": datetime.now(pytz.timezone("US/Pacific")).isoformat(),
}
self.vector_store.insert(
vectors=[source_embedding],
payloads=[source_payload],
ids=[source_id],
)
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
cypher = f"""
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH destination
MERGE (source {source_label} {{`~id`: $source_id, name: $source_name, user_id: $user_id}})
ON CREATE SET
source.created = timestamp(),
source.updated = timestamp(),
source.mentions = 1
{source_extra_set}
ON MATCH SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"source_id": source_id,
"source_name": source,
"source_embedding": source_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_relationship_entities_cypher(
self,
source_node_list,
destination_node_list,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source_node_list: list of source node ids
:param destination_node_list: list of dest node ids
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions) + 1,
destination.updated = timestamp()
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created_at = timestamp(),
r.updated_at = timestamp(),
r.mentions = 1
ON MATCH SET r.mentions = coalesce(r.mentions, 0) + 1
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_new_entities_cypher(
self,
source,
source_embedding,
source_type,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source: source node name
:param source_embedding: source node embedding
:param source_type: source node label
:param destination: destination name
:param dest_embedding: destination embedding
:param destination_type: destination node label
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
source_id = str(uuid.uuid4())
source_payload = {
"name": source,
"type": source_type,
"user_id": user_id,
"created_at": datetime.now(pytz.timezone("US/Pacific")).isoformat(),
}
destination_id = str(uuid.uuid4())
destination_payload = {
"name": destination,
"type": destination_type,
"user_id": user_id,
"created_at": datetime.now(pytz.timezone("US/Pacific")).isoformat(),
}
self.vector_store.insert(
vectors=[source_embedding, dest_embedding],
payloads=[source_payload, destination_payload],
ids=[source_id, destination_id],
)
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MERGE (n {source_label} {{name: $source_name, user_id: $user_id, `~id`: $source_id}})
ON CREATE SET n.created = timestamp(),
n.mentions = 1
{source_extra_set}
ON MATCH SET n.mentions = coalesce(n.mentions, 0) + 1
WITH n
MERGE (m {destination_label} {{name: $dest_name, user_id: $user_id, `~id`: $dest_id}})
ON CREATE SET m.created = timestamp(),
m.mentions = 1
{destination_extra_set}
ON MATCH SET m.mentions = coalesce(m.mentions, 0) + 1
WITH n, m
MERGE (n)-[rel:{relationship}]->(m)
ON CREATE SET rel.created = timestamp(), rel.mentions = 1
ON MATCH SET rel.mentions = coalesce(rel.mentions, 0) + 1
RETURN n.name AS source, type(rel) AS relationship, m.name AS target
"""
params = {
"source_id": source_id,
"dest_id": destination_id,
"source_name": source,
"dest_name": destination,
"source_embedding": source_embedding,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_new_entities_cypher:\n query={cypher}"
)
return cypher, params
def _search_source_node_cypher(self, source_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for source nodes
:param source_embedding: source vector
:param user_id: user_id to use
:param threshold: the threshold for similarity
:return: str, dict
"""
source_nodes = self.vector_store.search(
query="",
vectors=source_embedding,
limit=self.vector_store_limit,
filters={"user_id": user_id},
)
ids = [n.id for n in filter(lambda s: s.score > threshold, source_nodes)]
cypher = f"""
MATCH (source_candidate {self.node_label})
WHERE source_candidate.user_id = $user_id AND id(source_candidate) IN $ids
RETURN id(source_candidate)
"""
params = {
"ids": ids,
"source_embedding": source_embedding,
"user_id": user_id,
"threshold": threshold,
}
logger.debug(f"_search_source_node\n query={cypher}")
return cypher, params
def _search_destination_node_cypher(self, destination_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for destination nodes
:param source_embedding: source vector
:param user_id: user_id to use
:param threshold: the threshold for similarity
:return: str, dict
"""
destination_nodes = self.vector_store.search(
query="",
vectors=destination_embedding,
limit=self.vector_store_limit,
filters={"user_id": user_id},
)
ids = [n.id for n in filter(lambda d: d.score > threshold, destination_nodes)]
cypher = f"""
MATCH (destination_candidate {self.node_label})
WHERE destination_candidate.user_id = $user_id AND id(destination_candidate) IN $ids
RETURN id(destination_candidate)
"""
params = {
"ids": ids,
"destination_embedding": destination_embedding,
"user_id": user_id,
}
logger.debug(f"_search_destination_node\n query={cypher}")
return cypher, params
def _delete_all_cypher(self, filters):
"""
Returns the OpenCypher query and parameters to delete all edges/nodes in the memory store
:param filters: search filters
:return: str, dict
"""
# remove the vector store index
self.vector_store.reset()
# create a query that: deletes the nodes of the graph_store
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})
DETACH DELETE n
"""
params = {"user_id": filters["user_id"]}
logger.debug(f"delete_all query={cypher}")
return cypher, params
def _get_all_cypher(self, filters, limit):
"""
Returns the OpenCypher query and parameters to get all edges/nodes in the memory store
:param filters: search filters
:param limit: return limit
:return: str, dict
"""
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})-[r]->(m {self.node_label} {{user_id: $user_id}})
RETURN n.name AS source, type(r) AS relationship, m.name AS target
LIMIT $limit
"""
params = {"user_id": filters["user_id"], "limit": limit}
return cypher, params
def _search_graph_db_cypher(self, n_embedding, filters, limit):
"""
Returns the OpenCypher query and parameters to search for similar nodes in the memory store
:param n_embedding: node vector
:param filters: search filters
:param limit: return limit
:return: str, dict
"""
# search vector store for applicable nodes using cosine similarity
search_nodes = self.vector_store.search(
query="",
vectors=n_embedding,
limit=self.vector_store_limit,
filters=filters,
)
ids = [n.id for n in search_nodes]
cypher_query = f"""
MATCH (n {self.node_label})-[r]->(m)
WHERE n.user_id = $user_id AND id(n) IN $n_ids
RETURN n.name AS source, id(n) AS source_id, type(r) AS relationship, id(r) AS relation_id, m.name AS destination, id(m) AS destination_id
UNION
MATCH (m)-[r]->(n {self.node_label})
RETURN m.name AS source, id(m) AS source_id, type(r) AS relationship, id(r) AS relation_id, n.name AS destination, id(n) AS destination_id
LIMIT $limit
"""
params = {
"n_ids": ids,
"user_id": filters["user_id"],
"limit": limit,
}
logger.debug(f"_search_graph_db\n query={cypher_query}")
return cypher_query, params
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/graphs/neptune/neptunedb.py",
"license": "Apache License 2.0",
"lines": 453,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/graphs/neptune/neptunegraph.py | import logging
from .base import NeptuneBase
try:
from langchain_aws import NeptuneAnalyticsGraph
from botocore.config import Config
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it using 'make install_all'.")
logger = logging.getLogger(__name__)
class MemoryGraph(NeptuneBase):
def __init__(self, config):
self.config = config
self.graph = None
endpoint = self.config.graph_store.config.endpoint
app_id = self.config.graph_store.config.app_id
if endpoint and endpoint.startswith("neptune-graph://"):
graph_identifier = endpoint.replace("neptune-graph://", "")
self.graph = NeptuneAnalyticsGraph(graph_identifier = graph_identifier,
config = Config(user_agent_appid=app_id))
if not self.graph:
raise ValueError("Unable to create a Neptune client: missing 'endpoint' in config")
self.node_label = ":`__Entity__`" if self.config.graph_store.config.base_label else ""
self.embedding_model = NeptuneBase._create_embedding_model(self.config)
# Default to openai if no specific provider is configured
self.llm_provider = "openai"
if self.config.llm.provider:
self.llm_provider = self.config.llm.provider
if self.config.graph_store.llm:
self.llm_provider = self.config.graph_store.llm.provider
self.llm = NeptuneBase._create_llm(self.config, self.llm_provider)
self.user_id = None
# Use threshold from graph_store config, default to 0.7 for backward compatibility
self.threshold = self.config.graph_store.threshold if hasattr(self.config.graph_store, 'threshold') else 0.7
def _delete_entities_cypher(self, source, destination, relationship, user_id):
"""
Returns the OpenCypher query and parameters for deleting entities in the graph DB
:param source: source node
:param destination: destination node
:param relationship: relationship label
:param user_id: user_id to use
:return: str, dict
"""
cypher = f"""
MATCH (n {self.node_label} {{name: $source_name, user_id: $user_id}})
-[r:{relationship}]->
(m {self.node_label} {{name: $dest_name, user_id: $user_id}})
DELETE r
RETURN
n.name AS source,
m.name AS target,
type(r) AS relationship
"""
params = {
"source_name": source,
"dest_name": destination,
"user_id": user_id,
}
logger.debug(f"_delete_entities\n query={cypher}")
return cypher, params
def _add_entities_by_source_cypher(
self,
source_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source_node_list: list of source nodes
:param destination: destination name
:param dest_embedding: destination embedding
:param destination_type: destination node label
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET source.mentions = coalesce(source.mentions, 0) + 1
WITH source
MERGE (destination {destination_label} {{name: $destination_name, user_id: $user_id}})
ON CREATE SET
destination.created = timestamp(),
destination.updated = timestamp(),
destination.mentions = 1
{destination_extra_set}
ON MATCH SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH source, destination, $dest_embedding as dest_embedding
CALL neptune.algo.vectors.upsert(destination, dest_embedding)
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_name": destination,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_entities_by_destination_cypher(
self,
source,
source_embedding,
source_type,
destination_node_list,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source: source node name
:param source_embedding: source node embedding
:param source_type: source node label
:param destination_node_list: list of dest nodes
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
cypher = f"""
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH destination
MERGE (source {source_label} {{name: $source_name, user_id: $user_id}})
ON CREATE SET
source.created = timestamp(),
source.updated = timestamp(),
source.mentions = 1
{source_extra_set}
ON MATCH SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source, destination, $source_embedding as source_embedding
CALL neptune.algo.vectors.upsert(source, source_embedding)
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"source_name": source,
"source_embedding": source_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_relationship_entities_cypher(
self,
source_node_list,
destination_node_list,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source_node_list: list of source node ids
:param destination_node_list: list of dest node ids
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions) + 1,
destination.updated = timestamp()
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created_at = timestamp(),
r.updated_at = timestamp(),
r.mentions = 1
ON MATCH SET r.mentions = coalesce(r.mentions, 0) + 1
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_new_entities_cypher(
self,
source,
source_embedding,
source_type,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
:param source: source node name
:param source_embedding: source node embedding
:param source_type: source node label
:param destination: destination name
:param dest_embedding: destination embedding
:param destination_type: destination node label
:param relationship: relationship label
:param user_id: user id to use
:return: str, dict
"""
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MERGE (n {source_label} {{name: $source_name, user_id: $user_id}})
ON CREATE SET n.created = timestamp(),
n.updated = timestamp(),
n.mentions = 1
{source_extra_set}
ON MATCH SET
n.mentions = coalesce(n.mentions, 0) + 1,
n.updated = timestamp()
WITH n, $source_embedding as source_embedding
CALL neptune.algo.vectors.upsert(n, source_embedding)
WITH n
MERGE (m {destination_label} {{name: $dest_name, user_id: $user_id}})
ON CREATE SET
m.created = timestamp(),
m.updated = timestamp(),
m.mentions = 1
{destination_extra_set}
ON MATCH SET
m.updated = timestamp(),
m.mentions = coalesce(m.mentions, 0) + 1
WITH n, m, $dest_embedding as dest_embedding
CALL neptune.algo.vectors.upsert(m, dest_embedding)
WITH n, m
MERGE (n)-[rel:{relationship}]->(m)
ON CREATE SET
rel.created = timestamp(),
rel.updated = timestamp(),
rel.mentions = 1
ON MATCH SET
rel.updated = timestamp(),
rel.mentions = coalesce(rel.mentions, 0) + 1
RETURN n.name AS source, type(rel) AS relationship, m.name AS target
"""
params = {
"source_name": source,
"dest_name": destination,
"source_embedding": source_embedding,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_new_entities_cypher:\n query={cypher}"
)
return cypher, params
def _search_source_node_cypher(self, source_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for source nodes
:param source_embedding: source vector
:param user_id: user_id to use
:param threshold: the threshold for similarity
:return: str, dict
"""
cypher = f"""
MATCH (source_candidate {self.node_label})
WHERE source_candidate.user_id = $user_id
WITH source_candidate, $source_embedding as v_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
v_embedding,
source_candidate,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH source_candidate, distance AS cosine_similarity
WHERE cosine_similarity >= $threshold
WITH source_candidate, cosine_similarity
ORDER BY cosine_similarity DESC
LIMIT 1
RETURN id(source_candidate), cosine_similarity
"""
params = {
"source_embedding": source_embedding,
"user_id": user_id,
"threshold": threshold,
}
logger.debug(f"_search_source_node\n query={cypher}")
return cypher, params
def _search_destination_node_cypher(self, destination_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for destination nodes
:param source_embedding: source vector
:param user_id: user_id to use
:param threshold: the threshold for similarity
:return: str, dict
"""
cypher = f"""
MATCH (destination_candidate {self.node_label})
WHERE destination_candidate.user_id = $user_id
WITH destination_candidate, $destination_embedding as v_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
v_embedding,
destination_candidate,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH destination_candidate, distance AS cosine_similarity
WHERE cosine_similarity >= $threshold
WITH destination_candidate, cosine_similarity
ORDER BY cosine_similarity DESC
LIMIT 1
RETURN id(destination_candidate), cosine_similarity
"""
params = {
"destination_embedding": destination_embedding,
"user_id": user_id,
"threshold": threshold,
}
logger.debug(f"_search_destination_node\n query={cypher}")
return cypher, params
def _delete_all_cypher(self, filters):
"""
Returns the OpenCypher query and parameters to delete all edges/nodes in the memory store
:param filters: search filters
:return: str, dict
"""
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})
DETACH DELETE n
"""
params = {"user_id": filters["user_id"]}
logger.debug(f"delete_all query={cypher}")
return cypher, params
def _get_all_cypher(self, filters, limit):
"""
Returns the OpenCypher query and parameters to get all edges/nodes in the memory store
:param filters: search filters
:param limit: return limit
:return: str, dict
"""
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})-[r]->(m {self.node_label} {{user_id: $user_id}})
RETURN n.name AS source, type(r) AS relationship, m.name AS target
LIMIT $limit
"""
params = {"user_id": filters["user_id"], "limit": limit}
return cypher, params
def _search_graph_db_cypher(self, n_embedding, filters, limit):
"""
Returns the OpenCypher query and parameters to search for similar nodes in the memory store
:param n_embedding: node vector
:param filters: search filters
:param limit: return limit
:return: str, dict
"""
cypher_query = f"""
MATCH (n {self.node_label})
WHERE n.user_id = $user_id
WITH n, $n_embedding as n_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
n_embedding,
n,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH n, distance as similarity
WHERE similarity >= $threshold
CALL {{
WITH n
MATCH (n)-[r]->(m)
RETURN n.name AS source, id(n) AS source_id, type(r) AS relationship, id(r) AS relation_id, m.name AS destination, id(m) AS destination_id
UNION ALL
WITH n
MATCH (m)-[r]->(n)
RETURN m.name AS source, id(m) AS source_id, type(r) AS relationship, id(r) AS relation_id, n.name AS destination, id(n) AS destination_id
}}
WITH distinct source, source_id, relationship, relation_id, destination, destination_id, similarity
RETURN source, source_id, relationship, relation_id, destination, destination_id, similarity
ORDER BY similarity DESC
LIMIT $limit
"""
params = {
"n_embedding": n_embedding,
"threshold": self.threshold,
"user_id": filters["user_id"],
"limit": limit,
}
logger.debug(f"_search_graph_db\n query={cypher_query}")
return cypher_query, params
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/graphs/neptune/neptunegraph.py",
"license": "Apache License 2.0",
"lines": 422,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:tests/memory/test_neptune_analytics_memory.py | import unittest
from unittest.mock import MagicMock, patch
import pytest
from mem0.graphs.neptune.neptunegraph import MemoryGraph
from mem0.graphs.neptune.base import NeptuneBase
class TestNeptuneMemory(unittest.TestCase):
"""Test suite for the Neptune Memory implementation."""
def setUp(self):
"""Set up test fixtures before each test method."""
# Create a mock config
self.config = MagicMock()
self.config.graph_store.config.endpoint = "neptune-graph://test-graph"
self.config.graph_store.config.base_label = True
self.config.graph_store.threshold = 0.7
self.config.llm.provider = "openai_structured"
self.config.graph_store.llm = None
self.config.graph_store.custom_prompt = None
# Create mock for NeptuneAnalyticsGraph
self.mock_graph = MagicMock()
self.mock_graph.client.get_graph.return_value = {"status": "AVAILABLE"}
# Create mocks for static methods
self.mock_embedding_model = MagicMock()
self.mock_llm = MagicMock()
# Patch the necessary components
self.neptune_analytics_graph_patcher = patch("mem0.graphs.neptune.neptunegraph.NeptuneAnalyticsGraph")
self.mock_neptune_analytics_graph = self.neptune_analytics_graph_patcher.start()
self.mock_neptune_analytics_graph.return_value = self.mock_graph
# Patch the static methods
self.create_embedding_model_patcher = patch.object(NeptuneBase, "_create_embedding_model")
self.mock_create_embedding_model = self.create_embedding_model_patcher.start()
self.mock_create_embedding_model.return_value = self.mock_embedding_model
self.create_llm_patcher = patch.object(NeptuneBase, "_create_llm")
self.mock_create_llm = self.create_llm_patcher.start()
self.mock_create_llm.return_value = self.mock_llm
# Create the MemoryGraph instance
self.memory_graph = MemoryGraph(self.config)
# Set up common test data
self.user_id = "test_user"
self.test_filters = {"user_id": self.user_id}
def tearDown(self):
"""Tear down test fixtures after each test method."""
self.neptune_analytics_graph_patcher.stop()
self.create_embedding_model_patcher.stop()
self.create_llm_patcher.stop()
def test_initialization(self):
"""Test that the MemoryGraph is initialized correctly."""
self.assertEqual(self.memory_graph.graph, self.mock_graph)
self.assertEqual(self.memory_graph.embedding_model, self.mock_embedding_model)
self.assertEqual(self.memory_graph.llm, self.mock_llm)
self.assertEqual(self.memory_graph.llm_provider, "openai_structured")
self.assertEqual(self.memory_graph.node_label, ":`__Entity__`")
self.assertEqual(self.memory_graph.threshold, 0.7)
def test_init(self):
"""Test the class init functions"""
# Create a mock config with bad endpoint
config_no_endpoint = MagicMock()
config_no_endpoint.graph_store.config.endpoint = None
# Create the MemoryGraph instance
with pytest.raises(ValueError):
MemoryGraph(config_no_endpoint)
# Create a mock config with bad endpoint
config_ndb_endpoint = MagicMock()
config_ndb_endpoint.graph_store.config.endpoint = "neptune-db://test-graph"
with pytest.raises(ValueError):
MemoryGraph(config_ndb_endpoint)
def test_add_method(self):
"""Test the add method with mocked components."""
# Mock the necessary methods that add() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person", "bob": "person"})
self.memory_graph._establish_nodes_relations_from_data = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "destination": "bob"}]
)
self.memory_graph._search_graph_db = MagicMock(return_value=[])
self.memory_graph._get_delete_entities_from_search_output = MagicMock(return_value=[])
self.memory_graph._delete_entities = MagicMock(return_value=[])
self.memory_graph._add_entities = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "target": "bob"}]
)
# Call the add method
result = self.memory_graph.add("Alice knows Bob", self.test_filters)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Alice knows Bob", self.test_filters)
self.memory_graph._establish_nodes_relations_from_data.assert_called_once()
self.memory_graph._search_graph_db.assert_called_once()
self.memory_graph._get_delete_entities_from_search_output.assert_called_once()
self.memory_graph._delete_entities.assert_called_once_with([], self.user_id)
self.memory_graph._add_entities.assert_called_once()
# Check the result structure
self.assertIn("deleted_entities", result)
self.assertIn("added_entities", result)
def test_search_method(self):
"""Test the search method with mocked components."""
# Mock the necessary methods that search() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person"})
# Mock search results
mock_search_results = [
{"source": "alice", "relationship": "knows", "destination": "bob"},
{"source": "alice", "relationship": "works_with", "destination": "charlie"},
]
self.memory_graph._search_graph_db = MagicMock(return_value=mock_search_results)
# Mock BM25Okapi
with patch("mem0.graphs.neptune.base.BM25Okapi") as mock_bm25:
mock_bm25_instance = MagicMock()
mock_bm25.return_value = mock_bm25_instance
# Mock get_top_n to return reranked results
reranked_results = [["alice", "knows", "bob"], ["alice", "works_with", "charlie"]]
mock_bm25_instance.get_top_n.return_value = reranked_results
# Call the search method
result = self.memory_graph.search("Find Alice", self.test_filters, limit=5)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Find Alice", self.test_filters)
self.memory_graph._search_graph_db.assert_called_once_with(node_list=["alice"], filters=self.test_filters)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["destination"], "bob")
def test_get_all_method(self):
"""Test the get_all method."""
# Mock the _get_all_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"user_id": self.user_id, "limit": 10}
self.memory_graph._get_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [
{"source": "alice", "relationship": "knows", "target": "bob"},
{"source": "bob", "relationship": "works_with", "target": "charlie"},
]
self.mock_graph.query.return_value = mock_query_result
# Call the get_all method
result = self.memory_graph.get_all(self.test_filters, limit=10)
# Verify the method calls
self.memory_graph._get_all_cypher.assert_called_once_with(self.test_filters, 10)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["target"], "bob")
def test_delete_all_method(self):
"""Test the delete_all method."""
# Mock the _delete_all_cypher method
mock_cypher = "MATCH (n) DETACH DELETE n"
mock_params = {"user_id": self.user_id}
self.memory_graph._delete_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Call the delete_all method
self.memory_graph.delete_all(self.test_filters)
# Verify the method calls
self.memory_graph._delete_all_cypher.assert_called_once_with(self.test_filters)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
def test_search_source_node(self):
"""Test the _search_source_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_source_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_source_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_source_node method
result = self.memory_graph._search_source_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_source_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_destination_node(self):
"""Test the _search_destination_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_destination_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"destination_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_destination_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_destination_node method
result = self.memory_graph._search_destination_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_destination_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_graph_db(self):
"""Test the _search_graph_db method."""
# Mock node list
node_list = ["alice", "bob"]
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock the _search_graph_db_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"n_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.7, "limit": 10}
self.memory_graph._search_graph_db_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query results
mock_query_result1 = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
mock_query_result2 = [{"source": "bob", "relationship": "works_with", "destination": "charlie"}]
self.mock_graph.query.side_effect = [mock_query_result1, mock_query_result2]
# Call the _search_graph_db method
result = self.memory_graph._search_graph_db(node_list, self.test_filters, limit=10)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.assertEqual(self.memory_graph._search_graph_db_cypher.call_count, 2)
self.assertEqual(self.mock_graph.query.call_count, 2)
# Check the result
expected_result = mock_query_result1 + mock_query_result2
self.assertEqual(result, expected_result)
def test_add_entities(self):
"""Test the _add_entities method."""
# Mock data
to_be_added = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
entity_type_map = {"alice": "person", "bob": "person"}
# Mock embeddings
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock search results
mock_source_search = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
mock_dest_search = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
# Mock the search methods
self.memory_graph._search_source_node = MagicMock(return_value=mock_source_search)
self.memory_graph._search_destination_node = MagicMock(return_value=mock_dest_search)
# Mock the _add_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_id": 123, "destination_id": 456}
self.memory_graph._add_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _add_entities method
result = self.memory_graph._add_entities(to_be_added, self.user_id, entity_type_map)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.memory_graph._search_source_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._search_destination_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._add_entities_cypher.assert_called_once()
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
def test_delete_entities(self):
"""Test the _delete_entities method."""
# Mock data
to_be_deleted = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
# Mock the _delete_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_name": "alice", "dest_name": "bob", "user_id": self.user_id}
self.memory_graph._delete_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _delete_entities method
result = self.memory_graph._delete_entities(to_be_deleted, self.user_id)
# Verify the method calls
self.memory_graph._delete_entities_cypher.assert_called_once_with("alice", "bob", "knows", self.user_id)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_neptune_analytics_memory.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/exceptions.py | """Structured exception classes for Mem0 with error codes, suggestions, and debug information.
This module provides a comprehensive set of exception classes that replace the generic
APIError with specific, actionable exceptions. Each exception includes error codes,
user-friendly suggestions, and debug information to enable better error handling
and recovery in applications using Mem0.
Example:
Basic usage:
try:
memory.add(content, user_id=user_id)
except RateLimitError as e:
# Implement exponential backoff
time.sleep(e.debug_info.get('retry_after', 60))
except MemoryQuotaExceededError as e:
# Trigger quota upgrade flow
logger.error(f"Quota exceeded: {e.error_code}")
except ValidationError as e:
# Return user-friendly error
raise HTTPException(400, detail=e.suggestion)
Advanced usage with error context:
try:
memory.update(memory_id, content=new_content)
except MemoryNotFoundError as e:
logger.warning(f"Memory {memory_id} not found: {e.message}")
if e.suggestion:
logger.info(f"Suggestion: {e.suggestion}")
"""
from typing import Any, Dict, Optional
class MemoryError(Exception):
"""Base exception for all memory-related errors.
This is the base class for all Mem0-specific exceptions. It provides a structured
approach to error handling with error codes, contextual details, suggestions for
resolution, and debug information.
Attributes:
message (str): Human-readable error message.
error_code (str): Unique error identifier for programmatic handling.
details (dict): Additional context about the error.
suggestion (str): User-friendly suggestion for resolving the error.
debug_info (dict): Technical debugging information.
Example:
raise MemoryError(
message="Memory operation failed",
error_code="MEM_001",
details={"operation": "add", "user_id": "user123"},
suggestion="Please check your API key and try again",
debug_info={"request_id": "req_456", "timestamp": "2024-01-01T00:00:00Z"}
)
"""
def __init__(
self,
message: str,
error_code: str,
details: Optional[Dict[str, Any]] = None,
suggestion: Optional[str] = None,
debug_info: Optional[Dict[str, Any]] = None,
):
"""Initialize a MemoryError.
Args:
message: Human-readable error message.
error_code: Unique error identifier.
details: Additional context about the error.
suggestion: User-friendly suggestion for resolving the error.
debug_info: Technical debugging information.
"""
self.message = message
self.error_code = error_code
self.details = details or {}
self.suggestion = suggestion
self.debug_info = debug_info or {}
super().__init__(self.message)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"message={self.message!r}, "
f"error_code={self.error_code!r}, "
f"details={self.details!r}, "
f"suggestion={self.suggestion!r}, "
f"debug_info={self.debug_info!r})"
)
class AuthenticationError(MemoryError):
"""Raised when authentication fails.
This exception is raised when API key validation fails, tokens are invalid,
or authentication credentials are missing or expired.
Common scenarios:
- Invalid API key
- Expired authentication token
- Missing authentication headers
- Insufficient permissions
Example:
raise AuthenticationError(
message="Invalid API key provided",
error_code="AUTH_001",
suggestion="Please check your API key in the Mem0 dashboard"
)
"""
pass
class RateLimitError(MemoryError):
"""Raised when rate limits are exceeded.
This exception is raised when the API rate limit has been exceeded.
It includes information about retry timing and current rate limit status.
The debug_info typically contains:
- retry_after: Seconds to wait before retrying
- limit: Current rate limit
- remaining: Remaining requests in current window
- reset_time: When the rate limit window resets
Example:
raise RateLimitError(
message="Rate limit exceeded",
error_code="RATE_001",
suggestion="Please wait before making more requests",
debug_info={"retry_after": 60, "limit": 100, "remaining": 0}
)
"""
pass
class ValidationError(MemoryError):
"""Raised when input validation fails.
This exception is raised when request parameters, memory content,
or configuration values fail validation checks.
Common scenarios:
- Invalid user_id format
- Missing required fields
- Content too long or too short
- Invalid metadata format
- Malformed filters
Example:
raise ValidationError(
message="Invalid user_id format",
error_code="VAL_001",
details={"field": "user_id", "value": "123", "expected": "string"},
suggestion="User ID must be a non-empty string"
)
"""
pass
class MemoryNotFoundError(MemoryError):
"""Raised when a memory is not found.
This exception is raised when attempting to access, update, or delete
a memory that doesn't exist or is not accessible to the current user.
Example:
raise MemoryNotFoundError(
message="Memory not found",
error_code="MEM_404",
details={"memory_id": "mem_123", "user_id": "user_456"},
suggestion="Please check the memory ID and ensure it exists"
)
"""
pass
class NetworkError(MemoryError):
"""Raised when network connectivity issues occur.
This exception is raised for network-related problems such as
connection timeouts, DNS resolution failures, or service unavailability.
Common scenarios:
- Connection timeout
- DNS resolution failure
- Service temporarily unavailable
- Network connectivity issues
Example:
raise NetworkError(
message="Connection timeout",
error_code="NET_001",
suggestion="Please check your internet connection and try again",
debug_info={"timeout": 30, "endpoint": "api.mem0.ai"}
)
"""
pass
class ConfigurationError(MemoryError):
"""Raised when client configuration is invalid.
This exception is raised when the client is improperly configured,
such as missing required settings or invalid configuration values.
Common scenarios:
- Missing API key
- Invalid host URL
- Incompatible configuration options
- Missing required environment variables
Example:
raise ConfigurationError(
message="API key not configured",
error_code="CFG_001",
suggestion="Set MEM0_API_KEY environment variable or pass api_key parameter"
)
"""
pass
class MemoryQuotaExceededError(MemoryError):
"""Raised when user's memory quota is exceeded.
This exception is raised when the user has reached their memory
storage or usage limits.
The debug_info typically contains:
- current_usage: Current memory usage
- quota_limit: Maximum allowed usage
- usage_type: Type of quota (storage, requests, etc.)
Example:
raise MemoryQuotaExceededError(
message="Memory quota exceeded",
error_code="QUOTA_001",
suggestion="Please upgrade your plan or delete unused memories",
debug_info={"current_usage": 1000, "quota_limit": 1000, "usage_type": "memories"}
)
"""
pass
class MemoryCorruptionError(MemoryError):
"""Raised when memory data is corrupted.
This exception is raised when stored memory data is found to be
corrupted, malformed, or otherwise unreadable.
Example:
raise MemoryCorruptionError(
message="Memory data is corrupted",
error_code="CORRUPT_001",
details={"memory_id": "mem_123"},
suggestion="Please contact support for data recovery assistance"
)
"""
pass
class VectorSearchError(MemoryError):
"""Raised when vector search operations fail.
This exception is raised when vector database operations fail,
such as search queries, embedding generation, or index operations.
Common scenarios:
- Embedding model unavailable
- Vector index corruption
- Search query timeout
- Incompatible vector dimensions
Example:
raise VectorSearchError(
message="Vector search failed",
error_code="VEC_001",
details={"query": "find similar memories", "vector_dim": 1536},
suggestion="Please try a simpler search query"
)
"""
pass
class CacheError(MemoryError):
"""Raised when caching operations fail.
This exception is raised when cache-related operations fail,
such as cache misses, cache invalidation errors, or cache corruption.
Example:
raise CacheError(
message="Cache operation failed",
error_code="CACHE_001",
details={"operation": "get", "key": "user_memories_123"},
suggestion="Cache will be refreshed automatically"
)
"""
pass
# OSS-specific exception classes
class VectorStoreError(MemoryError):
"""Raised when vector store operations fail.
This exception is raised when vector store operations fail,
such as embedding storage, similarity search, or vector operations.
Example:
raise VectorStoreError(
message="Vector store operation failed",
error_code="VECTOR_001",
details={"operation": "search", "collection": "memories"},
suggestion="Please check your vector store configuration and connection"
)
"""
def __init__(self, message: str, error_code: str = "VECTOR_001", details: dict = None,
suggestion: str = "Please check your vector store configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class GraphStoreError(MemoryError):
"""Raised when graph store operations fail.
This exception is raised when graph store operations fail,
such as relationship creation, entity management, or graph queries.
Example:
raise GraphStoreError(
message="Graph store operation failed",
error_code="GRAPH_001",
details={"operation": "create_relationship", "entity": "user_123"},
suggestion="Please check your graph store configuration and connection"
)
"""
def __init__(self, message: str, error_code: str = "GRAPH_001", details: dict = None,
suggestion: str = "Please check your graph store configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class EmbeddingError(MemoryError):
"""Raised when embedding operations fail.
This exception is raised when embedding operations fail,
such as text embedding generation or embedding model errors.
Example:
raise EmbeddingError(
message="Embedding generation failed",
error_code="EMBED_001",
details={"text_length": 1000, "model": "openai"},
suggestion="Please check your embedding model configuration"
)
"""
def __init__(self, message: str, error_code: str = "EMBED_001", details: dict = None,
suggestion: str = "Please check your embedding model configuration",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class LLMError(MemoryError):
"""Raised when LLM operations fail.
This exception is raised when LLM operations fail,
such as text generation, completion, or model inference errors.
Example:
raise LLMError(
message="LLM operation failed",
error_code="LLM_001",
details={"model": "gpt-4", "prompt_length": 500},
suggestion="Please check your LLM configuration and API key"
)
"""
def __init__(self, message: str, error_code: str = "LLM_001", details: dict = None,
suggestion: str = "Please check your LLM configuration and API key",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class DatabaseError(MemoryError):
"""Raised when database operations fail.
This exception is raised when database operations fail,
such as SQLite operations, connection issues, or data corruption.
Example:
raise DatabaseError(
message="Database operation failed",
error_code="DB_001",
details={"operation": "insert", "table": "memories"},
suggestion="Please check your database configuration and connection"
)
"""
def __init__(self, message: str, error_code: str = "DB_001", details: dict = None,
suggestion: str = "Please check your database configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class DependencyError(MemoryError):
"""Raised when required dependencies are missing.
This exception is raised when required dependencies are missing,
such as optional packages for specific providers or features.
Example:
raise DependencyError(
message="Required dependency missing",
error_code="DEPS_001",
details={"package": "kuzu", "feature": "graph_store"},
suggestion="Please install the required dependencies: pip install kuzu"
)
"""
def __init__(self, message: str, error_code: str = "DEPS_001", details: dict = None,
suggestion: str = "Please install the required dependencies",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
# Mapping of HTTP status codes to specific exception classes
HTTP_STATUS_TO_EXCEPTION = {
400: ValidationError,
401: AuthenticationError,
403: AuthenticationError,
404: MemoryNotFoundError,
408: NetworkError,
409: ValidationError,
413: MemoryQuotaExceededError,
422: ValidationError,
429: RateLimitError,
500: MemoryError,
502: NetworkError,
503: NetworkError,
504: NetworkError,
}
def create_exception_from_response(
status_code: int,
response_text: str,
error_code: Optional[str] = None,
details: Optional[Dict[str, Any]] = None,
debug_info: Optional[Dict[str, Any]] = None,
) -> MemoryError:
"""Create an appropriate exception based on HTTP response.
This function analyzes the HTTP status code and response to create
the most appropriate exception type with relevant error information.
Args:
status_code: HTTP status code from the response.
response_text: Response body text.
error_code: Optional specific error code.
details: Additional error context.
debug_info: Debug information.
Returns:
An instance of the appropriate MemoryError subclass.
Example:
exception = create_exception_from_response(
status_code=429,
response_text="Rate limit exceeded",
debug_info={"retry_after": 60}
)
# Returns a RateLimitError instance
"""
exception_class = HTTP_STATUS_TO_EXCEPTION.get(status_code, MemoryError)
# Generate error code if not provided
if not error_code:
error_code = f"HTTP_{status_code}"
# Create appropriate suggestion based on status code
suggestions = {
400: "Please check your request parameters and try again",
401: "Please check your API key and authentication credentials",
403: "You don't have permission to perform this operation",
404: "The requested resource was not found",
408: "Request timed out. Please try again",
409: "Resource conflict. Please check your request",
413: "Request too large. Please reduce the size of your request",
422: "Invalid request data. Please check your input",
429: "Rate limit exceeded. Please wait before making more requests",
500: "Internal server error. Please try again later",
502: "Service temporarily unavailable. Please try again later",
503: "Service unavailable. Please try again later",
504: "Gateway timeout. Please try again later",
}
suggestion = suggestions.get(status_code, "Please try again later")
return exception_class(
message=response_text or f"HTTP {status_code} error",
error_code=error_code,
details=details or {},
suggestion=suggestion,
debug_info=debug_info or {},
) | {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/exceptions.py",
"license": "Apache License 2.0",
"lines": 408,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/configs/vector_stores/neptune.py | """
Configuration for Amazon Neptune Analytics vector store.
This module provides configuration settings for integrating with Amazon Neptune Analytics
as a vector store backend for Mem0's memory layer.
"""
from pydantic import BaseModel, Field
class NeptuneAnalyticsConfig(BaseModel):
"""
Configuration class for Amazon Neptune Analytics vector store.
Amazon Neptune Analytics is a graph analytics engine that can be used as a vector store
for storing and retrieving memory embeddings in Mem0.
Attributes:
collection_name (str): Name of the collection to store vectors. Defaults to "mem0".
endpoint (str): Neptune Analytics graph endpoint URL or Graph ID for the runtime.
"""
collection_name: str = Field("mem0", description="Default name for the collection")
endpoint: str = Field("endpoint", description="Graph ID for the runtime")
model_config = {
"arbitrary_types_allowed": False,
}
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/neptune.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/vector_stores/neptune_analytics.py | import logging
import time
import uuid
from typing import Dict, List, Optional
from pydantic import BaseModel
try:
from langchain_aws import NeptuneAnalyticsGraph
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it using pip install langchain_aws")
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class NeptuneAnalyticsVector(VectorStoreBase):
"""
Neptune Analytics vector store implementation for Mem0.
Provides vector storage and similarity search capabilities using Amazon Neptune Analytics,
a serverless graph analytics service that supports vector operations.
"""
_COLLECTION_PREFIX = "MEM0_VECTOR_"
_FIELD_N = 'n'
_FIELD_ID = '~id'
_FIELD_PROP = '~properties'
_FIELD_SCORE = 'score'
_FIELD_LABEL = 'label'
_TIMEZONE = "UTC"
def __init__(
self,
endpoint: str,
collection_name: str,
):
"""
Initialize the Neptune Analytics vector store.
Args:
endpoint (str): Neptune Analytics endpoint in format 'neptune-graph://<graphid>'.
collection_name (str): Name of the collection to store vectors.
Raises:
ValueError: If endpoint format is invalid.
ImportError: If langchain_aws is not installed.
"""
if not endpoint.startswith("neptune-graph://"):
raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.")
graph_id = endpoint.replace("neptune-graph://", "")
self.graph = NeptuneAnalyticsGraph(graph_id)
self.collection_name = self._COLLECTION_PREFIX + collection_name
def create_col(self, name, vector_size, distance):
"""
Create a collection (no-op for Neptune Analytics).
Neptune Analytics supports dynamic indices that are created implicitly
when vectors are inserted, so this method performs no operation.
Args:
name: Collection name (unused).
vector_size: Vector dimension (unused).
distance: Distance metric (unused).
"""
pass
def insert(self, vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None):
"""
Insert vectors into the collection.
Creates or updates nodes in Neptune Analytics with vector embeddings and metadata.
Uses MERGE operation to handle both creation and updates.
Args:
vectors (List[list]): List of embedding vectors to insert.
payloads (Optional[List[Dict]]): Optional metadata for each vector.
ids (Optional[List[str]]): Optional IDs for vectors. Generated if not provided.
"""
para_list = []
for index, data_vector in enumerate(vectors):
if payloads:
payload = payloads[index]
payload[self._FIELD_LABEL] = self.collection_name
payload["updated_at"] = str(int(time.time()))
else:
payload = {}
para_list.append(dict(
node_id=ids[index] if ids else str(uuid.uuid4()),
properties=payload,
embedding=data_vector,
))
para_map_to_insert = {"rows": para_list}
query_string = (f"""
UNWIND $rows AS row
MERGE (n :{self.collection_name} {{`~id`: row.node_id}})
ON CREATE SET n = row.properties
ON MATCH SET n += row.properties
"""
)
self.execute_query(query_string, para_map_to_insert)
query_string_vector = (f"""
UNWIND $rows AS row
MATCH (n
:{self.collection_name}
{{`~id`: row.node_id}})
WITH n, row.embedding AS embedding
CALL neptune.algo.vectors.upsert(n, embedding)
YIELD success
RETURN success
"""
)
result = self.execute_query(query_string_vector, para_map_to_insert)
self._process_success_message(result, "Vector store - Insert")
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
"""
Search for similar vectors using embedding similarity.
Performs vector similarity search using Neptune Analytics' topKByEmbeddingWithFiltering
algorithm to find the most similar vectors.
Args:
query (str): Search query text (unused in vector search).
vectors (List[float]): Query embedding vector.
limit (int, optional): Maximum number of results to return. Defaults to 5.
filters (Optional[Dict]): Optional filters to apply to search results.
Returns:
List[OutputData]: List of similar vectors with scores and metadata.
"""
if not filters:
filters = {}
filters[self._FIELD_LABEL] = self.collection_name
filter_clause = self._get_node_filter_clause(filters)
query_string = f"""
CALL neptune.algo.vectors.topKByEmbeddingWithFiltering({{
topK: {limit},
embedding: {vectors}
{filter_clause}
}}
)
YIELD node, score
RETURN node as n, score
"""
query_response = self.execute_query(query_string)
if len(query_response) > 0:
return self._parse_query_responses(query_response, with_score=True)
else :
return []
def delete(self, vector_id: str):
"""
Delete a vector by its ID.
Removes the node and all its relationships from the Neptune Analytics graph.
Args:
vector_id (str): ID of the vector to delete.
"""
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $node_id
DETACH DELETE n
"""
self.execute_query(query_string, params)
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
"""
Update a vector's embedding and/or metadata.
Updates the node properties and/or vector embedding for an existing vector.
Can update either the payload, the vector, or both.
Args:
vector_id (str): ID of the vector to update.
vector (Optional[List[float]]): New embedding vector.
payload (Optional[Dict]): New metadata to replace existing payload.
"""
if payload:
# Replace payload
payload[self._FIELD_LABEL] = self.collection_name
payload["updated_at"] = str(int(time.time()))
para_payload = {
"properties": payload,
"vector_id": vector_id
}
query_string_embedding = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $vector_id
SET n = $properties
"""
self.execute_query(query_string_embedding, para_payload)
if vector:
para_embedding = {
"embedding": vector,
"vector_id": vector_id
}
query_string_embedding = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $vector_id
WITH $embedding as embedding, n as n
CALL neptune.algo.vectors.upsert(n, embedding)
YIELD success
RETURN success
"""
self.execute_query(query_string_embedding, para_embedding)
def get(self, vector_id: str):
"""
Retrieve a vector by its ID.
Fetches the node data including metadata for the specified vector ID.
Args:
vector_id (str): ID of the vector to retrieve.
Returns:
OutputData: Vector data with metadata, or None if not found.
"""
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $node_id
RETURN n
"""
# Composite the query
result = self.execute_query(query_string, params)
if len(result) != 0:
return self._parse_query_responses(result)[0]
def list_cols(self):
"""
List all collections with the Mem0 prefix.
Queries the Neptune Analytics schema to find all node labels that start
with the Mem0 collection prefix.
Returns:
List[str]: List of collection names.
"""
query_string = f"""
CALL neptune.graph.pg_schema()
YIELD schema
RETURN [ label IN schema.nodeLabels WHERE label STARTS WITH '{self.collection_name}'] AS result
"""
result = self.execute_query(query_string)
if len(result) == 1 and "result" in result[0]:
return result[0]["result"]
else:
return []
def delete_col(self):
"""
Delete the entire collection.
Removes all nodes with the collection label and their relationships
from the Neptune Analytics graph.
"""
self.execute_query(f"MATCH (n :{self.collection_name}) DETACH DELETE n")
def col_info(self):
"""
Get collection information (no-op for Neptune Analytics).
Collections are created dynamically in Neptune Analytics, so no
collection-specific metadata is available.
"""
pass
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
"""
List all vectors in the collection with optional filtering.
Retrieves vectors from the collection, optionally filtered by metadata properties.
Args:
filters (Optional[Dict]): Optional filters to apply based on metadata.
limit (int, optional): Maximum number of vectors to return. Defaults to 100.
Returns:
List[OutputData]: List of vectors with their metadata.
"""
where_clause = self._get_where_clause(filters) if filters else ""
para = {
"limit": limit,
}
query_string = f"""
MATCH (n :{self.collection_name})
{where_clause}
RETURN n
LIMIT $limit
"""
query_response = self.execute_query(query_string, para)
if len(query_response) > 0:
# Handle if there is no match.
return [self._parse_query_responses(query_response)]
return [[]]
def reset(self):
"""
Reset the collection by deleting all vectors.
Removes all vectors from the collection, effectively resetting it to empty state.
"""
self.delete_col()
def _parse_query_responses(self, response: dict, with_score: bool = False):
"""
Parse Neptune Analytics query responses into OutputData objects.
Args:
response (dict): Raw query response from Neptune Analytics.
with_score (bool, optional): Whether to include similarity scores. Defaults to False.
Returns:
List[OutputData]: Parsed response data.
"""
result = []
# Handle if there is no match.
for item in response:
id = item[self._FIELD_N][self._FIELD_ID]
properties = item[self._FIELD_N][self._FIELD_PROP]
properties.pop("label", None)
if with_score:
score = item[self._FIELD_SCORE]
else:
score = None
result.append(OutputData(
id=id,
score=score,
payload=properties,
))
return result
def execute_query(self, query_string: str, params=None):
"""
Execute an openCypher query on Neptune Analytics.
This is a wrapper method around the Neptune Analytics graph query execution
that provides debug logging for query monitoring and troubleshooting.
Args:
query_string (str): The openCypher query string to execute.
params (dict): Parameters to bind to the query.
Returns:
Query result from Neptune Analytics graph execution.
"""
if params is None:
params = {}
logger.debug(f"Executing openCypher query:[{query_string}], with parameters:[{params}].")
return self.graph.query(query_string, params)
@staticmethod
def _get_where_clause(filters: dict):
"""
Build WHERE clause for Cypher queries from filters.
Args:
filters (dict): Filter conditions as key-value pairs.
Returns:
str: Formatted WHERE clause for Cypher query.
"""
where_clause = ""
for i, (k, v) in enumerate(filters.items()):
if i == 0:
where_clause += f"WHERE n.{k} = '{v}' "
else:
where_clause += f"AND n.{k} = '{v}' "
return where_clause
@staticmethod
def _get_node_filter_clause(filters: dict):
"""
Build node filter clause for vector search operations.
Creates filter conditions for Neptune Analytics vector search operations
using the nodeFilter parameter format.
Args:
filters (dict): Filter conditions as key-value pairs.
Returns:
str: Formatted node filter clause for vector search.
"""
conditions = []
for k, v in filters.items():
conditions.append(f"{{equals:{{property: '{k}', value: '{v}'}}}}")
if len(conditions) == 1:
filter_clause = f", nodeFilter: {conditions[0]}"
else:
filter_clause = f"""
, nodeFilter: {{andAll: [ {", ".join(conditions)} ]}}
"""
return filter_clause
@staticmethod
def _process_success_message(response, context):
"""
Process and validate success messages from Neptune Analytics operations.
Checks the response from vector operations (insert/update) to ensure they
completed successfully. Logs errors if operations fail.
Args:
response: Response from Neptune Analytics vector operation.
context (str): Context description for logging (e.g., "Vector store - Insert").
"""
for success_message in response:
if "success" not in success_message:
logger.error(f"Query execution status is absent on action: [{context}]")
break
if success_message["success"] is not True:
logger.error(f"Abnormal response status on action: [{context}] with message: [{success_message['success']}] ")
break
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/neptune_analytics.py",
"license": "Apache License 2.0",
"lines": 372,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:tests/vector_stores/test_neptune_analytics.py | import logging
import os
import sys
import pytest
from dotenv import load_dotenv
from mem0.utils.factory import VectorStoreFactory
load_dotenv()
# Configure logging
logging.getLogger("mem0.vector.neptune.main").setLevel(logging.INFO)
logging.getLogger("mem0.vector.neptune.base").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.basicConfig(
format="%(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
# Test constants
EMBEDDING_MODEL_DIMS = 1024
VECTOR_1 = [-0.1] * EMBEDDING_MODEL_DIMS
VECTOR_2 = [-0.2] * EMBEDDING_MODEL_DIMS
VECTOR_3 = [-0.3] * EMBEDDING_MODEL_DIMS
SAMPLE_PAYLOADS = [
{"test_text": "text_value", "another_field": "field_2_value"},
{"test_text": "text_value_BBBB"},
{"test_text": "text_value_CCCC"}
]
@pytest.mark.skipif(not os.getenv("RUN_TEST_NEPTUNE_ANALYTICS"), reason="Only run with RUN_TEST_NEPTUNE_ANALYTICS is true")
class TestNeptuneAnalyticsOperations:
"""Test basic CRUD operations."""
@pytest.fixture
def na_instance(self):
"""Create Neptune Analytics vector store instance for testing."""
config = {
"endpoint": f"neptune-graph://{os.getenv('GRAPH_ID')}",
"collection_name": "test",
}
return VectorStoreFactory.create("neptune", config)
def test_insert_and_list(self, na_instance):
"""Test vector insertion and listing."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
assert "label" not in list_result[0].payload
def test_get(self, na_instance):
"""Test retrieving a specific vector."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
vector_a = na_instance.get("A")
assert vector_a.id == "A"
assert vector_a.score is None
assert vector_a.payload["test_text"] == "text_value"
assert vector_a.payload["another_field"] == "field_2_value"
assert "label" not in vector_a.payload
def test_update(self, na_instance):
"""Test updating vector payload."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
na_instance.update(vector_id="A", payload={"updated_payload_str": "update_str"})
vector_a = na_instance.get("A")
assert vector_a.id == "A"
assert vector_a.score is None
assert vector_a.payload["updated_payload_str"] == "update_str"
assert "label" not in vector_a.payload
def test_delete(self, na_instance):
"""Test deleting a specific vector."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
size_before = na_instance.list()[0]
assert len(size_before) == 1
na_instance.delete("A")
size_after = na_instance.list()[0]
assert len(size_after) == 0
def test_search(self, na_instance):
"""Test vector similarity search."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
result = na_instance.search(query="", vectors=VECTOR_1, limit=1)
assert len(result) == 1
assert "label" not in result[0].payload
def test_reset(self, na_instance):
"""Test resetting the collection."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
na_instance.reset()
list_result = na_instance.list()[0]
assert len(list_result) == 0
def test_delete_col(self, na_instance):
"""Test deleting the entire collection."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
na_instance.delete_col()
list_result = na_instance.list()[0]
assert len(list_result) == 0
def test_list_cols(self, na_instance):
"""Test listing collections."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
result = na_instance.list_cols()
assert result == ["MEM0_VECTOR_test"]
def test_invalid_endpoint_format(self):
"""Test that invalid endpoint format raises ValueError."""
config = {
"endpoint": f"xxx://{os.getenv('GRAPH_ID')}",
"collection_name": "test",
}
with pytest.raises(ValueError):
VectorStoreFactory.create("neptune", config)
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_neptune_analytics.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/vector_stores/valkey.py | from pydantic import BaseModel
class ValkeyConfig(BaseModel):
"""Configuration for Valkey vector store."""
valkey_url: str
collection_name: str
embedding_model_dims: int
timezone: str = "UTC"
index_type: str = "hnsw" # Default to HNSW, can be 'hnsw' or 'flat'
# HNSW specific parameters with recommended defaults
hnsw_m: int = 16 # Number of connections per layer (default from Valkey docs)
hnsw_ef_construction: int = 200 # Search width during construction
hnsw_ef_runtime: int = 10 # Search width during queries
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/valkey.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/vector_stores/valkey.py | import json
import logging
from datetime import datetime
from typing import Dict
import numpy as np
import pytz
import valkey
from pydantic import BaseModel
from valkey.exceptions import ResponseError
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
# Default fields for the Valkey index
DEFAULT_FIELDS = [
{"name": "memory_id", "type": "tag"},
{"name": "hash", "type": "tag"},
{"name": "agent_id", "type": "tag"},
{"name": "run_id", "type": "tag"},
{"name": "user_id", "type": "tag"},
{"name": "memory", "type": "tag"}, # Using TAG instead of TEXT for Valkey compatibility
{"name": "metadata", "type": "tag"}, # Using TAG instead of TEXT for Valkey compatibility
{"name": "created_at", "type": "numeric"},
{"name": "updated_at", "type": "numeric"},
{
"name": "embedding",
"type": "vector",
"attrs": {"distance_metric": "cosine", "algorithm": "flat", "datatype": "float32"},
},
]
excluded_keys = {"user_id", "agent_id", "run_id", "hash", "data", "created_at", "updated_at"}
class OutputData(BaseModel):
id: str
score: float
payload: Dict
class ValkeyDB(VectorStoreBase):
def __init__(
self,
valkey_url: str,
collection_name: str,
embedding_model_dims: int,
timezone: str = "UTC",
index_type: str = "hnsw",
hnsw_m: int = 16,
hnsw_ef_construction: int = 200,
hnsw_ef_runtime: int = 10,
):
"""
Initialize the Valkey vector store.
Args:
valkey_url (str): Valkey URL.
collection_name (str): Collection name.
embedding_model_dims (int): Embedding model dimensions.
timezone (str, optional): Timezone for timestamps. Defaults to "UTC".
index_type (str, optional): Index type ('hnsw' or 'flat'). Defaults to "hnsw".
hnsw_m (int, optional): HNSW M parameter (connections per node). Defaults to 16.
hnsw_ef_construction (int, optional): HNSW ef_construction parameter. Defaults to 200.
hnsw_ef_runtime (int, optional): HNSW ef_runtime parameter. Defaults to 10.
"""
self.embedding_model_dims = embedding_model_dims
self.collection_name = collection_name
self.prefix = f"mem0:{collection_name}"
self.timezone = timezone
self.index_type = index_type.lower()
self.hnsw_m = hnsw_m
self.hnsw_ef_construction = hnsw_ef_construction
self.hnsw_ef_runtime = hnsw_ef_runtime
# Validate index type
if self.index_type not in ["hnsw", "flat"]:
raise ValueError(f"Invalid index_type: {index_type}. Must be 'hnsw' or 'flat'")
# Connect to Valkey
try:
self.client = valkey.from_url(valkey_url)
logger.debug(f"Successfully connected to Valkey at {valkey_url}")
except Exception as e:
logger.exception(f"Failed to connect to Valkey at {valkey_url}: {e}")
raise
# Create the index schema
self._create_index(embedding_model_dims)
def _build_index_schema(self, collection_name, embedding_dims, distance_metric, prefix):
"""
Build the FT.CREATE command for index creation.
Args:
collection_name (str): Name of the collection/index
embedding_dims (int): Vector embedding dimensions
distance_metric (str): Distance metric (e.g., "COSINE", "L2", "IP")
prefix (str): Key prefix for the index
Returns:
list: Complete FT.CREATE command as list of arguments
"""
# Build the vector field configuration based on index type
if self.index_type == "hnsw":
vector_config = [
"embedding",
"VECTOR",
"HNSW",
"12", # Attribute count: TYPE, FLOAT32, DIM, dims, DISTANCE_METRIC, metric, M, m, EF_CONSTRUCTION, ef_construction, EF_RUNTIME, ef_runtime
"TYPE",
"FLOAT32",
"DIM",
str(embedding_dims),
"DISTANCE_METRIC",
distance_metric,
"M",
str(self.hnsw_m),
"EF_CONSTRUCTION",
str(self.hnsw_ef_construction),
"EF_RUNTIME",
str(self.hnsw_ef_runtime),
]
elif self.index_type == "flat":
vector_config = [
"embedding",
"VECTOR",
"FLAT",
"6", # Attribute count: TYPE, FLOAT32, DIM, dims, DISTANCE_METRIC, metric
"TYPE",
"FLOAT32",
"DIM",
str(embedding_dims),
"DISTANCE_METRIC",
distance_metric,
]
else:
# This should never happen due to constructor validation, but be defensive
raise ValueError(f"Unsupported index_type: {self.index_type}. Must be 'hnsw' or 'flat'")
# Build the complete command (comma is default separator for TAG fields)
cmd = [
"FT.CREATE",
collection_name,
"ON",
"HASH",
"PREFIX",
"1",
prefix,
"SCHEMA",
"memory_id",
"TAG",
"hash",
"TAG",
"agent_id",
"TAG",
"run_id",
"TAG",
"user_id",
"TAG",
"memory",
"TAG",
"metadata",
"TAG",
"created_at",
"NUMERIC",
"updated_at",
"NUMERIC",
] + vector_config
return cmd
def _create_index(self, embedding_model_dims):
"""
Create the search index with the specified schema.
Args:
embedding_model_dims (int): Dimensions for the vector embeddings.
Raises:
ValueError: If the search module is not available.
Exception: For other errors during index creation.
"""
# Check if the search module is available
try:
# Try to execute a search command
self.client.execute_command("FT._LIST")
except ResponseError as e:
if "unknown command" in str(e).lower():
raise ValueError(
"Valkey search module is not available. Please ensure Valkey is running with the search module enabled. "
"The search module can be loaded using the --loadmodule option with the valkey-search library. "
"For installation and setup instructions, refer to the Valkey Search documentation."
)
else:
logger.exception(f"Error checking search module: {e}")
raise
# Check if the index already exists
try:
self.client.ft(self.collection_name).info()
return
except ResponseError as e:
if "not found" not in str(e).lower():
logger.exception(f"Error checking index existence: {e}")
raise
# Build and execute the index creation command
cmd = self._build_index_schema(
self.collection_name,
embedding_model_dims,
"COSINE", # Fixed distance metric for initialization
self.prefix,
)
try:
self.client.execute_command(*cmd)
logger.info(f"Successfully created {self.index_type.upper()} index {self.collection_name}")
except Exception as e:
logger.exception(f"Error creating index {self.collection_name}: {e}")
raise
def create_col(self, name=None, vector_size=None, distance=None):
"""
Create a new collection (index) in Valkey.
Args:
name (str, optional): Name for the collection. Defaults to None, which uses the current collection_name.
vector_size (int, optional): Size of the vector embeddings. Defaults to None, which uses the current embedding_model_dims.
distance (str, optional): Distance metric to use. Defaults to None, which uses 'cosine'.
Returns:
The created index object.
"""
# Use provided parameters or fall back to instance attributes
collection_name = name or self.collection_name
embedding_dims = vector_size or self.embedding_model_dims
distance_metric = distance or "COSINE"
prefix = f"mem0:{collection_name}"
# Try to drop the index if it exists (cleanup before creation)
self._drop_index(collection_name, log_level="silent")
# Build and execute the index creation command
cmd = self._build_index_schema(
collection_name,
embedding_dims,
distance_metric, # Configurable distance metric
prefix,
)
try:
self.client.execute_command(*cmd)
logger.info(f"Successfully created {self.index_type.upper()} index {collection_name}")
# Update instance attributes if creating a new collection
if name:
self.collection_name = collection_name
self.prefix = prefix
return self.client.ft(collection_name)
except Exception as e:
logger.exception(f"Error creating collection {collection_name}: {e}")
raise
def insert(self, vectors: list, payloads: list = None, ids: list = None):
"""
Insert vectors and their payloads into the index.
Args:
vectors (list): List of vectors to insert.
payloads (list, optional): List of payloads corresponding to the vectors.
ids (list, optional): List of IDs for the vectors.
"""
for vector, payload, id in zip(vectors, payloads, ids):
try:
# Create the key for the hash
key = f"{self.prefix}:{id}"
# Check for required fields and provide defaults if missing
if "data" not in payload:
# Silently use default value for missing 'data' field
pass
# Ensure created_at is present
if "created_at" not in payload:
payload["created_at"] = datetime.now(pytz.timezone(self.timezone)).isoformat()
# Prepare the hash data
hash_data = {
"memory_id": id,
"hash": payload.get("hash", f"hash_{id}"), # Use a default hash if not provided
"memory": payload.get("data", f"data_{id}"), # Use a default data if not provided
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
hash_data[field] = payload[field]
# Add metadata
hash_data["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
# Store in Valkey
self.client.hset(key, mapping=hash_data)
logger.debug(f"Successfully inserted vector with ID {id}")
except KeyError as e:
logger.error(f"Error inserting vector with ID {id}: Missing required field {e}")
except Exception as e:
logger.exception(f"Error inserting vector with ID {id}: {e}")
raise
def _build_search_query(self, knn_part, filters=None):
"""
Build a search query string with filters.
Args:
knn_part (str): The KNN part of the query.
filters (dict, optional): Filters to apply to the search. Each key-value pair
becomes a tag filter (@key:{value}). None values are ignored.
Values are used as-is (no validation) - wildcards, lists, etc. are
passed through literally to Valkey search. Multiple filters are
combined with AND logic (space-separated).
Returns:
str: The complete search query string in format "filter_expr =>[KNN...]"
or "*=>[KNN...]" if no valid filters.
"""
# No filters, just use the KNN search
if not filters or not any(value is not None for key, value in filters.items()):
return f"*=>{knn_part}"
# Build filter expression
filter_parts = []
for key, value in filters.items():
if value is not None:
# Use the correct filter syntax for Valkey
filter_parts.append(f"@{key}:{{{value}}}")
# No valid filter parts
if not filter_parts:
return f"*=>{knn_part}"
# Combine filter parts with proper syntax
filter_expr = " ".join(filter_parts)
return f"{filter_expr} =>{knn_part}"
def _execute_search(self, query, params):
"""
Execute a search query.
Args:
query (str): The search query to execute.
params (dict): The query parameters.
Returns:
The search results.
"""
try:
return self.client.ft(self.collection_name).search(query, query_params=params)
except ResponseError as e:
logger.error(f"Search failed with query '{query}': {e}")
raise
def _process_search_results(self, results):
"""
Process search results into OutputData objects.
Args:
results: The search results from Valkey.
Returns:
list: List of OutputData objects.
"""
memory_results = []
for doc in results.docs:
# Extract the score
score = float(doc.vector_score) if hasattr(doc, "vector_score") else None
# Create the payload
payload = {
"hash": doc.hash,
"data": doc.memory,
"created_at": self._format_timestamp(int(doc.created_at), self.timezone),
}
# Add updated_at if available
if hasattr(doc, "updated_at"):
payload["updated_at"] = self._format_timestamp(int(doc.updated_at), self.timezone)
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if hasattr(doc, field):
payload[field] = getattr(doc, field)
# Add metadata
if hasattr(doc, "metadata"):
try:
metadata = json.loads(extract_json(doc.metadata))
payload.update(metadata)
except (json.JSONDecodeError, TypeError) as e:
logger.warning(f"Failed to parse metadata: {e}")
# Create the result
memory_results.append(OutputData(id=doc.memory_id, score=score, payload=payload))
return memory_results
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None, ef_runtime: int = None):
"""
Search for similar vectors in the index.
Args:
query (str): The search query.
vectors (list): The vector to search for.
limit (int, optional): Maximum number of results to return. Defaults to 5.
filters (dict, optional): Filters to apply to the search. Defaults to None.
ef_runtime (int, optional): HNSW ef_runtime parameter for this query. Only used with HNSW index. Defaults to None.
Returns:
list: List of OutputData objects.
"""
# Convert the vector to bytes
vector_bytes = np.array(vectors, dtype=np.float32).tobytes()
# Build the KNN part with optional EF_RUNTIME for HNSW
if self.index_type == "hnsw" and ef_runtime is not None:
knn_part = f"[KNN {limit} @embedding $vec_param EF_RUNTIME {ef_runtime} AS vector_score]"
else:
# For FLAT indexes or when ef_runtime is None, use basic KNN
knn_part = f"[KNN {limit} @embedding $vec_param AS vector_score]"
# Build the complete query
q = self._build_search_query(knn_part, filters)
# Log the query for debugging (only in debug mode)
logger.debug(f"Valkey search query: {q}")
# Set up the query parameters
params = {"vec_param": vector_bytes}
# Execute the search
results = self._execute_search(q, params)
# Process the results
return self._process_search_results(results)
def delete(self, vector_id):
"""
Delete a vector from the index.
Args:
vector_id (str): ID of the vector to delete.
"""
try:
key = f"{self.prefix}:{vector_id}"
self.client.delete(key)
logger.debug(f"Successfully deleted vector with ID {vector_id}")
except Exception as e:
logger.exception(f"Error deleting vector with ID {vector_id}: {e}")
raise
def update(self, vector_id=None, vector=None, payload=None):
"""
Update a vector in the index.
Args:
vector_id (str): ID of the vector to update.
vector (list, optional): New vector data.
payload (dict, optional): New payload data.
"""
try:
key = f"{self.prefix}:{vector_id}"
# Check for required fields and provide defaults if missing
if "data" not in payload:
# Silently use default value for missing 'data' field
pass
# Ensure created_at is present
if "created_at" not in payload:
payload["created_at"] = datetime.now(pytz.timezone(self.timezone)).isoformat()
# Prepare the hash data
hash_data = {
"memory_id": vector_id,
"hash": payload.get("hash", f"hash_{vector_id}"), # Use a default hash if not provided
"memory": payload.get("data", f"data_{vector_id}"), # Use a default data if not provided
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
# Add updated_at if available
if "updated_at" in payload:
hash_data["updated_at"] = int(datetime.fromisoformat(payload["updated_at"]).timestamp())
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
hash_data[field] = payload[field]
# Add metadata
hash_data["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
# Update in Valkey
self.client.hset(key, mapping=hash_data)
logger.debug(f"Successfully updated vector with ID {vector_id}")
except KeyError as e:
logger.error(f"Error updating vector with ID {vector_id}: Missing required field {e}")
except Exception as e:
logger.exception(f"Error updating vector with ID {vector_id}: {e}")
raise
def _format_timestamp(self, timestamp, timezone=None):
"""
Format a timestamp with the specified timezone.
Args:
timestamp (int): The timestamp to format.
timezone (str, optional): The timezone to use. Defaults to UTC.
Returns:
str: The formatted timestamp.
"""
# Use UTC as default timezone if not specified
tz = pytz.timezone(timezone or "UTC")
return datetime.fromtimestamp(timestamp, tz=tz).isoformat(timespec="microseconds")
def _process_document_fields(self, result, vector_id):
"""
Process document fields from a Valkey hash result.
Args:
result (dict): The hash result from Valkey.
vector_id (str): The vector ID.
Returns:
dict: The processed payload.
str: The memory ID.
"""
# Create the payload with error handling
payload = {}
# Convert bytes to string for text fields
for k in result:
if k not in ["embedding"]:
if isinstance(result[k], bytes):
try:
result[k] = result[k].decode("utf-8")
except UnicodeDecodeError:
# If decoding fails, keep the bytes
pass
# Add required fields with error handling
for field in ["hash", "memory", "created_at"]:
if field in result:
if field == "created_at":
try:
payload[field] = self._format_timestamp(int(result[field]), self.timezone)
except (ValueError, TypeError):
payload[field] = result[field]
else:
payload[field] = result[field]
else:
# Use default values for missing fields
if field == "hash":
payload[field] = "unknown"
elif field == "memory":
payload[field] = "unknown"
elif field == "created_at":
payload[field] = self._format_timestamp(
int(datetime.now(tz=pytz.timezone(self.timezone)).timestamp()), self.timezone
)
# Rename memory to data for consistency
if "memory" in payload:
payload["data"] = payload.pop("memory")
# Add updated_at if available
if "updated_at" in result:
try:
payload["updated_at"] = self._format_timestamp(int(result["updated_at"]), self.timezone)
except (ValueError, TypeError):
payload["updated_at"] = result["updated_at"]
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in result:
payload[field] = result[field]
# Add metadata
if "metadata" in result:
try:
metadata = json.loads(extract_json(result["metadata"]))
payload.update(metadata)
except (json.JSONDecodeError, TypeError):
logger.warning(f"Failed to parse metadata: {result.get('metadata')}")
# Use memory_id from result if available, otherwise use vector_id
memory_id = result.get("memory_id", vector_id)
return payload, memory_id
def _convert_bytes(self, data):
"""Convert bytes data back to string"""
if isinstance(data, bytes):
try:
return data.decode("utf-8")
except UnicodeDecodeError:
return data
if isinstance(data, dict):
return {self._convert_bytes(key): self._convert_bytes(value) for key, value in data.items()}
if isinstance(data, list):
return [self._convert_bytes(item) for item in data]
if isinstance(data, tuple):
return tuple(self._convert_bytes(item) for item in data)
return data
def get(self, vector_id):
"""
Get a vector by ID.
Args:
vector_id (str): ID of the vector to get.
Returns:
OutputData: The retrieved vector.
"""
try:
key = f"{self.prefix}:{vector_id}"
result = self.client.hgetall(key)
if not result:
raise KeyError(f"Vector with ID {vector_id} not found")
# Convert bytes keys/values to strings
result = self._convert_bytes(result)
logger.debug(f"Retrieved result keys: {result.keys()}")
# Process the document fields
payload, memory_id = self._process_document_fields(result, vector_id)
return OutputData(id=memory_id, payload=payload, score=0.0)
except KeyError:
raise
except Exception as e:
logger.exception(f"Error getting vector with ID {vector_id}: {e}")
raise
def list_cols(self):
"""
List all collections (indices) in Valkey.
Returns:
list: List of collection names.
"""
try:
# Use the FT._LIST command to list all indices
return self.client.execute_command("FT._LIST")
except Exception as e:
logger.exception(f"Error listing collections: {e}")
raise
def _drop_index(self, collection_name, log_level="error"):
"""
Drop an index by name using the documented FT.DROPINDEX command.
Args:
collection_name (str): Name of the index to drop.
log_level (str): Logging level for missing index ("silent", "info", "error").
"""
try:
self.client.execute_command("FT.DROPINDEX", collection_name)
logger.info(f"Successfully deleted index {collection_name}")
return True
except ResponseError as e:
if "Unknown index name" in str(e):
# Index doesn't exist - handle based on context
if log_level == "silent":
pass # No logging in situations where this is expected such as initial index creation
elif log_level == "info":
logger.info(f"Index {collection_name} doesn't exist, skipping deletion")
return False
else:
# Real error - always log and raise
logger.error(f"Error deleting index {collection_name}: {e}")
raise
except Exception as e:
# Non-ResponseError exceptions - always log and raise
logger.error(f"Error deleting index {collection_name}: {e}")
raise
def delete_col(self):
"""
Delete the current collection (index).
"""
return self._drop_index(self.collection_name, log_level="info")
def col_info(self, name=None):
"""
Get information about a collection (index).
Args:
name (str, optional): Name of the collection. Defaults to None, which uses the current collection_name.
Returns:
dict: Information about the collection.
"""
try:
collection_name = name or self.collection_name
return self.client.ft(collection_name).info()
except Exception as e:
logger.exception(f"Error getting collection info for {collection_name}: {e}")
raise
def reset(self):
"""
Reset the index by deleting and recreating it.
"""
try:
collection_name = self.collection_name
logger.warning(f"Resetting index {collection_name}...")
# Delete the index
self.delete_col()
# Recreate the index
self._create_index(self.embedding_model_dims)
return True
except Exception as e:
logger.exception(f"Error resetting index {self.collection_name}: {e}")
raise
def _build_list_query(self, filters=None):
"""
Build a query for listing vectors.
Args:
filters (dict, optional): Filters to apply to the list. Each key-value pair
becomes a tag filter (@key:{value}). None values are ignored.
Values are used as-is (no validation) - wildcards, lists, etc. are
passed through literally to Valkey search.
Returns:
str: The query string. Returns "*" if no valid filters provided.
"""
# Default query
q = "*"
# Add filters if provided
if filters and any(value is not None for key, value in filters.items()):
filter_conditions = []
for key, value in filters.items():
if value is not None:
filter_conditions.append(f"@{key}:{{{value}}}")
if filter_conditions:
q = " ".join(filter_conditions)
return q
def list(self, filters: dict = None, limit: int = None) -> list:
"""
List all recent created memories from the vector store.
Args:
filters (dict, optional): Filters to apply to the list. Each key-value pair
becomes a tag filter (@key:{value}). None values are ignored.
Values are used as-is without validation - wildcards, special characters,
lists, etc. are passed through literally to Valkey search.
Multiple filters are combined with AND logic.
limit (int, optional): Maximum number of results to return. Defaults to 1000
if not specified.
Returns:
list: Nested list format [[MemoryResult(), ...]] matching Redis implementation.
Each MemoryResult contains id and payload with hash, data, timestamps, etc.
"""
try:
# Since Valkey search requires vector format, use a dummy vector search
# that returns all documents by using a zero vector and large K
dummy_vector = [0.0] * self.embedding_model_dims
search_limit = limit if limit is not None else 1000 # Large default
# Use the existing search method which handles filters properly
search_results = self.search("", dummy_vector, limit=search_limit, filters=filters)
# Convert search results to list format (match Redis format)
class MemoryResult:
def __init__(self, id: str, payload: dict, score: float = None):
self.id = id
self.payload = payload
self.score = score
memory_results = []
for result in search_results:
# Create payload in the expected format
payload = {
"hash": result.payload.get("hash", ""),
"data": result.payload.get("data", ""),
"created_at": result.payload.get("created_at"),
"updated_at": result.payload.get("updated_at"),
}
# Add metadata (exclude system fields)
for key, value in result.payload.items():
if key not in ["data", "hash", "created_at", "updated_at"]:
payload[key] = value
# Create MemoryResult object (matching Redis format)
memory_results.append(MemoryResult(id=result.id, payload=payload))
# Return nested list format like Redis
return [memory_results]
except Exception as e:
logger.exception(f"Error in list method: {e}")
return [[]] # Return empty result on error
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/valkey.py",
"license": "Apache License 2.0",
"lines": 695,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_valkey.py | import json
from datetime import datetime
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import pytz
from valkey.exceptions import ResponseError
from mem0.vector_stores.valkey import ValkeyDB
@pytest.fixture
def mock_valkey_client():
"""Create a mock Valkey client."""
with patch("valkey.from_url") as mock_client:
# Mock the ft method
mock_ft = MagicMock()
mock_client.return_value.ft = MagicMock(return_value=mock_ft)
mock_client.return_value.execute_command = MagicMock()
mock_client.return_value.hset = MagicMock()
mock_client.return_value.hgetall = MagicMock()
mock_client.return_value.delete = MagicMock()
yield mock_client.return_value
@pytest.fixture
def valkey_db(mock_valkey_client):
"""Create a ValkeyDB instance with a mock client."""
# Initialize the ValkeyDB with test parameters
valkey_db = ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
# Replace the client with our mock
valkey_db.client = mock_valkey_client
return valkey_db
def test_search_filter_syntax(valkey_db, mock_valkey_client):
"""Test that the search filter syntax is correctly formatted for Valkey."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5"
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Test with user_id filter
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user"},
)
# Check that the search was called with the correct filter syntax
args, kwargs = mock_ft.search.call_args
assert "@user_id:{test_user}" in args[0]
assert "=>[KNN" in args[0]
# Test with multiple filters
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user", "agent_id": "test_agent"},
)
# Check that the search was called with the correct filter syntax
args, kwargs = mock_ft.search.call_args
assert "@user_id:{test_user}" in args[0]
assert "@agent_id:{test_agent}" in args[0]
assert "=>[KNN" in args[0]
def test_search_without_filters(valkey_db, mock_valkey_client):
"""Test search without filters."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5"
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Test without filters
results = valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
)
# Check that the search was called with the correct syntax
args, kwargs = mock_ft.search.call_args
assert "*=>[KNN" in args[0]
# Check that results are processed correctly
assert len(results) == 1
assert results[0].id == "test_id"
assert results[0].payload["hash"] == "test_hash"
assert results[0].payload["data"] == "test_data"
assert "created_at" in results[0].payload
def test_insert(valkey_db, mock_valkey_client):
"""Test inserting vectors."""
# Prepare test data
vectors = [np.random.rand(1536).tolist()]
payloads = [{"hash": "test_hash", "data": "test_data", "user_id": "test_user"}]
ids = ["test_id"]
# Call insert
valkey_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert args[0] == "mem0:test_collection:test_id"
assert "memory_id" in kwargs["mapping"]
assert kwargs["mapping"]["memory_id"] == "test_id"
assert kwargs["mapping"]["hash"] == "test_hash"
assert kwargs["mapping"]["memory"] == "test_data"
assert kwargs["mapping"]["user_id"] == "test_user"
assert "created_at" in kwargs["mapping"]
assert "embedding" in kwargs["mapping"]
def test_insert_handles_missing_created_at(valkey_db, mock_valkey_client):
"""Test inserting vectors with missing created_at field."""
# Prepare test data
vectors = [np.random.rand(1536).tolist()]
payloads = [{"hash": "test_hash", "data": "test_data"}] # No created_at
ids = ["test_id"]
# Call insert
valkey_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert "created_at" in kwargs["mapping"] # Should be added automatically
def test_delete(valkey_db, mock_valkey_client):
"""Test deleting a vector."""
# Call delete
valkey_db.delete("test_id")
# Check that delete was called with the correct key
mock_valkey_client.delete.assert_called_once_with("mem0:test_collection:test_id")
def test_update(valkey_db, mock_valkey_client):
"""Test updating a vector."""
# Prepare test data
vector = np.random.rand(1536).tolist()
payload = {
"hash": "test_hash",
"data": "updated_data",
"created_at": datetime.now(pytz.timezone("UTC")).isoformat(),
"user_id": "test_user",
}
# Call update
valkey_db.update(vector_id="test_id", vector=vector, payload=payload)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert args[0] == "mem0:test_collection:test_id"
assert kwargs["mapping"]["memory_id"] == "test_id"
assert kwargs["mapping"]["memory"] == "updated_data"
def test_update_handles_missing_created_at(valkey_db, mock_valkey_client):
"""Test updating vectors with missing created_at field."""
# Prepare test data
vector = np.random.rand(1536).tolist()
payload = {"hash": "test_hash", "data": "updated_data"} # No created_at
# Call update
valkey_db.update(vector_id="test_id", vector=vector, payload=payload)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert "created_at" in kwargs["mapping"] # Should be added automatically
def test_get(valkey_db, mock_valkey_client):
"""Test getting a vector."""
# Mock hgetall to return a vector
mock_valkey_client.hgetall.return_value = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_data",
"created_at": str(int(datetime.now().timestamp())),
"metadata": json.dumps({"key": "value"}),
"user_id": "test_user",
}
# Call get
result = valkey_db.get("test_id")
# Check that hgetall was called with the correct key
mock_valkey_client.hgetall.assert_called_once_with("mem0:test_collection:test_id")
# Check the result
assert result.id == "test_id"
assert result.payload["hash"] == "test_hash"
assert result.payload["data"] == "test_data"
assert "created_at" in result.payload
assert result.payload["key"] == "value" # From metadata
assert result.payload["user_id"] == "test_user"
def test_get_not_found(valkey_db, mock_valkey_client):
"""Test getting a vector that doesn't exist."""
# Mock hgetall to return empty dict (not found)
mock_valkey_client.hgetall.return_value = {}
# Call get should raise KeyError
with pytest.raises(KeyError, match="Vector with ID test_id not found"):
valkey_db.get("test_id")
def test_list_cols(valkey_db, mock_valkey_client):
"""Test listing collections."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Mock execute_command to return list of indices
mock_valkey_client.execute_command.return_value = ["test_collection", "another_collection"]
# Call list_cols
result = valkey_db.list_cols()
# Check that execute_command was called with the correct command
mock_valkey_client.execute_command.assert_called_with("FT._LIST")
# Check the result
assert result == ["test_collection", "another_collection"]
def test_delete_col(valkey_db, mock_valkey_client):
"""Test deleting a collection."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Test successful deletion
result = valkey_db.delete_col()
assert result is True
# Check that execute_command was called with the correct command
mock_valkey_client.execute_command.assert_called_once_with("FT.DROPINDEX", "test_collection")
# Test error handling - real errors should still raise
mock_valkey_client.execute_command.side_effect = ResponseError("Error dropping index")
with pytest.raises(ResponseError, match="Error dropping index"):
valkey_db.delete_col()
# Test idempotent behavior - "Unknown index name" should return False, not raise
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db.delete_col()
assert result is False
def test_context_aware_logging(valkey_db, mock_valkey_client):
"""Test that _drop_index handles different log levels correctly."""
# Mock "Unknown index name" error
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
# Test silent mode - should not log anything (we can't easily test log output, but ensure no exception)
result = valkey_db._drop_index("test_collection", log_level="silent")
assert result is False
# Test info mode - should not raise exception
result = valkey_db._drop_index("test_collection", log_level="info")
assert result is False
# Test default mode - should not raise exception
result = valkey_db._drop_index("test_collection")
assert result is False
def test_col_info(valkey_db, mock_valkey_client):
"""Test getting collection info."""
# Mock ft().info() to return index info
mock_ft = mock_valkey_client.ft.return_value
# Reset the mock to clear previous calls
mock_ft.info.reset_mock()
mock_ft.info.return_value = {"index_name": "test_collection", "num_docs": 100}
# Call col_info
result = valkey_db.col_info()
# Check that ft().info() was called
assert mock_ft.info.called
# Check the result
assert result["index_name"] == "test_collection"
assert result["num_docs"] == 100
def test_create_col(valkey_db, mock_valkey_client):
"""Test creating a new collection."""
# Call create_col
valkey_db.create_col(name="new_collection", vector_size=768, distance="IP")
# Check that execute_command was called to create the index
assert mock_valkey_client.execute_command.called
args = mock_valkey_client.execute_command.call_args[0]
assert args[0] == "FT.CREATE"
assert args[1] == "new_collection"
# Check that the distance metric was set correctly
distance_metric_index = args.index("DISTANCE_METRIC")
assert args[distance_metric_index + 1] == "IP"
# Check that the vector size was set correctly
dim_index = args.index("DIM")
assert args[dim_index + 1] == "768"
def test_list(valkey_db, mock_valkey_client):
"""Test listing vectors."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5" # Add missing vector_score
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Call list
results = valkey_db.list(filters={"user_id": "test_user"}, limit=10)
# Check that search was called with the correct arguments
mock_ft.search.assert_called_once()
args, kwargs = mock_ft.search.call_args
# Now expects full search query with KNN part due to dummy vector approach
assert "@user_id:{test_user}" in args[0]
assert "=>[KNN" in args[0]
# Verify the results format
assert len(results) == 1
assert len(results[0]) == 1
assert results[0][0].id == "test_id"
# Check the results
assert len(results) == 1 # One list of results
assert len(results[0]) == 1 # One result in the list
assert results[0][0].id == "test_id"
assert results[0][0].payload["hash"] == "test_hash"
assert results[0][0].payload["data"] == "test_data"
def test_search_error_handling(valkey_db, mock_valkey_client):
"""Test search error handling when query fails."""
# Mock search to fail with an error
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.side_effect = ResponseError("Invalid filter expression")
# Call search should raise the error
with pytest.raises(ResponseError, match="Invalid filter expression"):
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user"},
)
# Check that search was called once
assert mock_ft.search.call_count == 1
def test_drop_index_error_handling(valkey_db, mock_valkey_client):
"""Test error handling when dropping an index."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Test 1: Real error (not "Unknown index name") should raise
mock_valkey_client.execute_command.side_effect = ResponseError("Error dropping index")
with pytest.raises(ResponseError, match="Error dropping index"):
valkey_db._drop_index("test_collection")
# Test 2: "Unknown index name" with default log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection")
assert result is False
# Test 3: "Unknown index name" with silent log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection", log_level="silent")
assert result is False
# Test 4: "Unknown index name" with info log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection", log_level="info")
assert result is False
# Test 5: Successful deletion should return True
mock_valkey_client.execute_command.side_effect = None # Reset to success
result = valkey_db._drop_index("test_collection")
assert result is True
def test_reset(valkey_db, mock_valkey_client):
"""Test resetting an index."""
# Mock delete_col and _create_index
with (
patch.object(valkey_db, "delete_col", return_value=True) as mock_delete_col,
patch.object(valkey_db, "_create_index") as mock_create_index,
):
# Call reset
result = valkey_db.reset()
# Check that delete_col and _create_index were called
mock_delete_col.assert_called_once()
mock_create_index.assert_called_once_with(1536)
# Check the result
assert result is True
def test_build_list_query(valkey_db):
"""Test building a list query with and without filters."""
# Test without filters
query = valkey_db._build_list_query(None)
assert query == "*"
# Test with empty filters
query = valkey_db._build_list_query({})
assert query == "*"
# Test with filters
query = valkey_db._build_list_query({"user_id": "test_user"})
assert query == "@user_id:{test_user}"
# Test with multiple filters
query = valkey_db._build_list_query({"user_id": "test_user", "agent_id": "test_agent"})
assert "@user_id:{test_user}" in query
assert "@agent_id:{test_agent}" in query
def test_process_document_fields(valkey_db):
"""Test processing document fields from hash results."""
# Create a mock result with all fields
result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_data",
"created_at": "1625097600", # 2021-07-01 00:00:00 UTC
"updated_at": "1625184000", # 2021-07-02 00:00:00 UTC
"user_id": "test_user",
"agent_id": "test_agent",
"metadata": json.dumps({"key": "value"}),
}
# Process the document fields
payload, memory_id = valkey_db._process_document_fields(result, "default_id")
# Check the results
assert memory_id == "test_id"
assert payload["hash"] == "test_hash"
assert payload["data"] == "test_data" # memory renamed to data
assert "created_at" in payload
assert "updated_at" in payload
assert payload["user_id"] == "test_user"
assert payload["agent_id"] == "test_agent"
assert payload["key"] == "value" # From metadata
# Test with missing fields
result = {
# No memory_id
"hash": "test_hash",
# No memory
# No created_at
}
# Process the document fields
payload, memory_id = valkey_db._process_document_fields(result, "default_id")
# Check the results
assert memory_id == "default_id" # Should use default_id
assert payload["hash"] == "test_hash"
assert "data" in payload # Should have default value
assert "created_at" in payload # Should have default value
def test_init_connection_error():
"""Test that initialization handles connection errors."""
# Mock the from_url to raise an exception
with patch("valkey.from_url") as mock_from_url:
mock_from_url.side_effect = Exception("Connection failed")
# Initialize ValkeyDB should raise the exception
with pytest.raises(Exception, match="Connection failed"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_build_search_query(valkey_db):
"""Test building search queries with different filter scenarios."""
# Test with no filters
knn_part = "[KNN 5 @embedding $vec_param AS vector_score]"
query = valkey_db._build_search_query(knn_part)
assert query == f"*=>{knn_part}"
# Test with empty filters
query = valkey_db._build_search_query(knn_part, {})
assert query == f"*=>{knn_part}"
# Test with None values in filters
query = valkey_db._build_search_query(knn_part, {"user_id": None})
assert query == f"*=>{knn_part}"
# Test with single filter
query = valkey_db._build_search_query(knn_part, {"user_id": "test_user"})
assert query == f"@user_id:{{test_user}} =>{knn_part}"
# Test with multiple filters
query = valkey_db._build_search_query(knn_part, {"user_id": "test_user", "agent_id": "test_agent"})
assert "@user_id:{test_user}" in query
assert "@agent_id:{test_agent}" in query
assert f"=>{knn_part}" in query
def test_get_error_handling(valkey_db, mock_valkey_client):
"""Test error handling in the get method."""
# Mock hgetall to raise an exception
mock_valkey_client.hgetall.side_effect = Exception("Unexpected error")
# Call get should raise the exception
with pytest.raises(Exception, match="Unexpected error"):
valkey_db.get("test_id")
def test_list_error_handling(valkey_db, mock_valkey_client):
"""Test error handling in the list method."""
# Mock search to raise an exception
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.side_effect = Exception("Unexpected error")
# Call list should return empty result on error
results = valkey_db.list(filters={"user_id": "test_user"})
# Check that the result is an empty list
assert results == [[]]
def test_create_index_other_error():
"""Test that initialization handles other errors during index creation."""
# Mock the execute_command to raise a different error
with patch("valkey.from_url") as mock_client:
mock_client.return_value.execute_command.side_effect = ResponseError("Some other error")
mock_client.return_value.ft = MagicMock()
mock_client.return_value.ft.return_value.info.side_effect = ResponseError("not found")
# Initialize ValkeyDB should raise the exception
with pytest.raises(ResponseError, match="Some other error"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_create_col_error(valkey_db, mock_valkey_client):
"""Test error handling in create_col method."""
# Mock execute_command to raise an exception
mock_valkey_client.execute_command.side_effect = Exception("Failed to create index")
# Call create_col should raise the exception
with pytest.raises(Exception, match="Failed to create index"):
valkey_db.create_col(name="new_collection", vector_size=768)
def test_list_cols_error(valkey_db, mock_valkey_client):
"""Test error handling in list_cols method."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Mock execute_command to raise an exception
mock_valkey_client.execute_command.side_effect = Exception("Failed to list indices")
# Call list_cols should raise the exception
with pytest.raises(Exception, match="Failed to list indices"):
valkey_db.list_cols()
def test_col_info_error(valkey_db, mock_valkey_client):
"""Test error handling in col_info method."""
# Mock ft().info() to raise an exception
mock_ft = mock_valkey_client.ft.return_value
mock_ft.info.side_effect = Exception("Failed to get index info")
# Call col_info should raise the exception
with pytest.raises(Exception, match="Failed to get index info"):
valkey_db.col_info()
# Additional tests to improve coverage
def test_invalid_index_type():
"""Test validation of invalid index type."""
with pytest.raises(ValueError, match="Invalid index_type: invalid. Must be 'hnsw' or 'flat'"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
index_type="invalid",
)
def test_index_existence_check_error(mock_valkey_client):
"""Test error handling when checking index existence."""
# Mock ft().info() to raise a ResponseError that's not "not found"
mock_ft = MagicMock()
mock_ft.info.side_effect = ResponseError("Some other error")
mock_valkey_client.ft.return_value = mock_ft
with patch("valkey.from_url", return_value=mock_valkey_client):
with pytest.raises(ResponseError):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_flat_index_creation(mock_valkey_client):
"""Test creation of FLAT index type."""
mock_ft = MagicMock()
# Mock the info method to raise ResponseError with "not found" to trigger index creation
mock_ft.info.side_effect = ResponseError("Index not found")
mock_valkey_client.ft.return_value = mock_ft
with patch("valkey.from_url", return_value=mock_valkey_client):
# Mock the execute_command to avoid the actual exception
mock_valkey_client.execute_command.return_value = None
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
index_type="flat",
)
# Verify that execute_command was called (index creation)
assert mock_valkey_client.execute_command.called
def test_index_creation_error(mock_valkey_client):
"""Test error handling during index creation."""
mock_ft = MagicMock()
mock_ft.info.side_effect = ResponseError("Unknown index name") # Index doesn't exist
mock_valkey_client.ft.return_value = mock_ft
mock_valkey_client.execute_command.side_effect = Exception("Failed to create index")
with patch("valkey.from_url", return_value=mock_valkey_client):
with pytest.raises(Exception, match="Failed to create index"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_insert_missing_required_field(valkey_db, mock_valkey_client):
"""Test error handling when inserting vector with missing required field."""
# Mock hset to raise KeyError (missing required field)
mock_valkey_client.hset.side_effect = KeyError("missing_field")
# This should not raise an exception but should log the error
valkey_db.insert(vectors=[np.random.rand(1536).tolist()], payloads=[{"memory": "test"}], ids=["test_id"])
def test_insert_general_error(valkey_db, mock_valkey_client):
"""Test error handling for general exceptions during insert."""
# Mock hset to raise a general exception
mock_valkey_client.hset.side_effect = Exception("Database error")
with pytest.raises(Exception, match="Database error"):
valkey_db.insert(vectors=[np.random.rand(1536).tolist()], payloads=[{"memory": "test"}], ids=["test_id"])
def test_search_with_invalid_metadata(valkey_db, mock_valkey_client):
"""Test search with invalid JSON metadata."""
# Mock search results with invalid JSON metadata
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = "invalid_json" # Invalid JSON
mock_doc.vector_score = "0.5"
mock_result = MagicMock()
mock_result.docs = [mock_doc]
mock_valkey_client.ft.return_value.search.return_value = mock_result
# Should handle invalid JSON gracefully
results = valkey_db.search(query="test query", vectors=np.random.rand(1536).tolist(), limit=5)
assert len(results) == 1
def test_search_with_hnsw_ef_runtime(valkey_db, mock_valkey_client):
"""Test search with HNSW ef_runtime parameter."""
valkey_db.index_type = "hnsw"
valkey_db.hnsw_ef_runtime = 20
mock_result = MagicMock()
mock_result.docs = []
mock_valkey_client.ft.return_value.search.return_value = mock_result
valkey_db.search(query="test query", vectors=np.random.rand(1536).tolist(), limit=5)
# Verify the search was called
assert mock_valkey_client.ft.return_value.search.called
def test_delete_error(valkey_db, mock_valkey_client):
"""Test error handling during vector deletion."""
mock_valkey_client.delete.side_effect = Exception("Delete failed")
with pytest.raises(Exception, match="Delete failed"):
valkey_db.delete("test_id")
def test_update_missing_required_field(valkey_db, mock_valkey_client):
"""Test error handling when updating vector with missing required field."""
mock_valkey_client.hset.side_effect = KeyError("missing_field")
# This should not raise an exception but should log the error
valkey_db.update(vector_id="test_id", vector=np.random.rand(1536).tolist(), payload={"memory": "updated"})
def test_update_general_error(valkey_db, mock_valkey_client):
"""Test error handling for general exceptions during update."""
mock_valkey_client.hset.side_effect = Exception("Update failed")
with pytest.raises(Exception, match="Update failed"):
valkey_db.update(vector_id="test_id", vector=np.random.rand(1536).tolist(), payload={"memory": "updated"})
def test_get_with_binary_data_and_unicode_error(valkey_db, mock_valkey_client):
"""Test get method with binary data that fails UTF-8 decoding."""
# Mock result with binary data that can't be decoded
mock_result = {
"memory_id": "test_id",
"hash": b"\xff\xfe", # Invalid UTF-8 bytes
"memory": "test_memory",
"created_at": "1234567890",
"updated_at": "invalid_timestamp",
"metadata": "{}",
"embedding": b"binary_embedding_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle binary data gracefully
assert result.id == "test_id"
assert result.payload["data"] == "test_memory"
def test_get_with_invalid_timestamps(valkey_db, mock_valkey_client):
"""Test get method with invalid timestamp values."""
mock_result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_memory",
"created_at": "invalid_timestamp",
"updated_at": "also_invalid",
"metadata": "{}",
"embedding": b"binary_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle invalid timestamps gracefully
assert result.id == "test_id"
assert "created_at" in result.payload
def test_get_with_invalid_metadata_json(valkey_db, mock_valkey_client):
"""Test get method with invalid JSON metadata."""
mock_result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_memory",
"created_at": "1234567890",
"updated_at": "1234567890",
"metadata": "invalid_json{", # Invalid JSON
"embedding": b"binary_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle invalid JSON gracefully
assert result.id == "test_id"
def test_list_with_missing_fields_and_defaults(valkey_db, mock_valkey_client):
"""Test list method with documents missing various fields."""
# Mock search results with missing fields but valid timestamps
mock_doc1 = MagicMock()
mock_doc1.memory_id = "fallback_id"
mock_doc1.hash = "test_hash" # Provide valid hash
mock_doc1.memory = "test_memory" # Provide valid memory
mock_doc1.created_at = str(int(datetime.now().timestamp())) # Valid timestamp
mock_doc1.updated_at = str(int(datetime.now().timestamp())) # Valid timestamp
mock_doc1.metadata = json.dumps({"key": "value"}) # Valid JSON
mock_doc1.vector_score = "0.5"
mock_result = MagicMock()
mock_result.docs = [mock_doc1]
mock_valkey_client.ft.return_value.search.return_value = mock_result
results = valkey_db.list()
# Should handle the search-based list approach
assert len(results) == 1
inner_results = results[0]
assert len(inner_results) == 1
result = inner_results[0]
assert result.id == "fallback_id"
assert "hash" in result.payload
assert "data" in result.payload # memory is renamed to data
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_valkey.py",
"license": "Apache License 2.0",
"lines": 665,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/vector_stores/s3_vectors.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class S3VectorsConfig(BaseModel):
vector_bucket_name: str = Field(description="Name of the S3 Vector bucket")
collection_name: str = Field("mem0", description="Name of the vector index")
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
distance_metric: str = Field(
"cosine",
description="Distance metric for similarity search. Options: 'cosine', 'euclidean'",
)
region_name: Optional[str] = Field(None, description="AWS region for the S3 Vectors client")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/s3_vectors.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/vector_stores/s3_vectors.py | import json
import logging
from typing import Dict, List, Optional
from pydantic import BaseModel
from mem0.vector_stores.base import VectorStoreBase
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[Dict]
class S3Vectors(VectorStoreBase):
def __init__(
self,
vector_bucket_name: str,
collection_name: str,
embedding_model_dims: int,
distance_metric: str = "cosine",
region_name: Optional[str] = None,
):
self.client = boto3.client("s3vectors", region_name=region_name)
self.vector_bucket_name = vector_bucket_name
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.distance_metric = distance_metric
self._ensure_bucket_exists()
self.create_col(self.collection_name, self.embedding_model_dims, self.distance_metric)
def _ensure_bucket_exists(self):
try:
self.client.get_vector_bucket(vectorBucketName=self.vector_bucket_name)
logger.info(f"Vector bucket '{self.vector_bucket_name}' already exists.")
except ClientError as e:
if e.response["Error"]["Code"] == "NotFoundException":
logger.info(f"Vector bucket '{self.vector_bucket_name}' not found. Creating it.")
self.client.create_vector_bucket(vectorBucketName=self.vector_bucket_name)
logger.info(f"Vector bucket '{self.vector_bucket_name}' created.")
else:
raise
def create_col(self, name, vector_size, distance="cosine"):
try:
self.client.get_index(vectorBucketName=self.vector_bucket_name, indexName=name)
logger.info(f"Index '{name}' already exists in bucket '{self.vector_bucket_name}'.")
except ClientError as e:
if e.response["Error"]["Code"] == "NotFoundException":
logger.info(f"Index '{name}' not found in bucket '{self.vector_bucket_name}'. Creating it.")
self.client.create_index(
vectorBucketName=self.vector_bucket_name,
indexName=name,
dataType="float32",
dimension=vector_size,
distanceMetric=distance,
)
logger.info(f"Index '{name}' created.")
else:
raise
def _parse_output(self, vectors: List[Dict]) -> List[OutputData]:
results = []
for v in vectors:
payload = v.get("metadata", {})
# Boto3 might return metadata as a JSON string
if isinstance(payload, str):
try:
payload = json.loads(payload)
except json.JSONDecodeError:
logger.warning(f"Failed to parse metadata for key {v.get('key')}")
payload = {}
results.append(OutputData(id=v.get("key"), score=v.get("distance"), payload=payload))
return results
def insert(self, vectors, payloads=None, ids=None):
vectors_to_put = []
for i, vec in enumerate(vectors):
vectors_to_put.append(
{
"key": ids[i],
"data": {"float32": vec},
"metadata": payloads[i] if payloads else {},
}
)
self.client.put_vectors(
vectorBucketName=self.vector_bucket_name,
indexName=self.collection_name,
vectors=vectors_to_put,
)
def search(self, query, vectors, limit=5, filters=None):
params = {
"vectorBucketName": self.vector_bucket_name,
"indexName": self.collection_name,
"queryVector": {"float32": vectors},
"topK": limit,
"returnMetadata": True,
"returnDistance": True,
}
if filters:
params["filter"] = filters
response = self.client.query_vectors(**params)
return self._parse_output(response.get("vectors", []))
def delete(self, vector_id):
self.client.delete_vectors(
vectorBucketName=self.vector_bucket_name,
indexName=self.collection_name,
keys=[vector_id],
)
def update(self, vector_id, vector=None, payload=None):
# S3 Vectors uses put_vectors for updates (overwrite)
self.insert(vectors=[vector], payloads=[payload], ids=[vector_id])
def get(self, vector_id) -> Optional[OutputData]:
response = self.client.get_vectors(
vectorBucketName=self.vector_bucket_name,
indexName=self.collection_name,
keys=[vector_id],
returnData=False,
returnMetadata=True,
)
vectors = response.get("vectors", [])
if not vectors:
return None
return self._parse_output(vectors)[0]
def list_cols(self):
response = self.client.list_indexes(vectorBucketName=self.vector_bucket_name)
return [idx["indexName"] for idx in response.get("indexes", [])]
def delete_col(self):
self.client.delete_index(vectorBucketName=self.vector_bucket_name, indexName=self.collection_name)
def col_info(self):
response = self.client.get_index(vectorBucketName=self.vector_bucket_name, indexName=self.collection_name)
return response.get("index", {})
def list(self, filters=None, limit=None):
# Note: list_vectors does not support metadata filtering.
if filters:
logger.warning("S3 Vectors `list` does not support metadata filtering. Ignoring filters.")
params = {
"vectorBucketName": self.vector_bucket_name,
"indexName": self.collection_name,
"returnData": False,
"returnMetadata": True,
}
if limit:
params["maxResults"] = limit
paginator = self.client.get_paginator("list_vectors")
pages = paginator.paginate(**params)
all_vectors = []
for page in pages:
all_vectors.extend(page.get("vectors", []))
return [self._parse_output(all_vectors)]
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.collection_name, self.embedding_model_dims, self.distance_metric)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/s3_vectors.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_s3_vectors.py | from mem0.configs.vector_stores.s3_vectors import S3VectorsConfig
import pytest
from botocore.exceptions import ClientError
from mem0.memory.main import Memory
from mem0.vector_stores.s3_vectors import S3Vectors
BUCKET_NAME = "test-bucket"
INDEX_NAME = "test-index"
EMBEDDING_DIMS = 1536
REGION = "us-east-1"
@pytest.fixture
def mock_boto_client(mocker):
"""Fixture to mock the boto3 S3Vectors client."""
mock_client = mocker.MagicMock()
mocker.patch("boto3.client", return_value=mock_client)
return mock_client
@pytest.fixture
def mock_embedder(mocker):
mock_embedder = mocker.MagicMock()
mock_embedder.return_value.embed.return_value = [0.1, 0.2, 0.3]
mocker.patch("mem0.utils.factory.EmbedderFactory.create", mock_embedder)
return mock_embedder
@pytest.fixture
def mock_llm(mocker):
mock_llm = mocker.MagicMock()
mocker.patch("mem0.utils.factory.LlmFactory.create", mock_llm)
mocker.patch("mem0.memory.storage.SQLiteManager", mocker.MagicMock())
return mock_llm
def test_initialization_creates_resources(mock_boto_client):
"""Test that bucket and index are created if they don't exist."""
not_found_error = ClientError(
{"Error": {"Code": "NotFoundException"}}, "OperationName"
)
mock_boto_client.get_vector_bucket.side_effect = not_found_error
mock_boto_client.get_index.side_effect = not_found_error
S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
region_name=REGION,
)
mock_boto_client.create_vector_bucket.assert_called_once_with(
vectorBucketName=BUCKET_NAME
)
mock_boto_client.create_index.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
dataType="float32",
dimension=EMBEDDING_DIMS,
distanceMetric="cosine",
)
def test_initialization_uses_existing_resources(mock_boto_client):
"""Test that existing bucket and index are used if found."""
mock_boto_client.get_vector_bucket.return_value = {}
mock_boto_client.get_index.return_value = {}
S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
region_name=REGION,
)
mock_boto_client.create_vector_bucket.assert_not_called()
mock_boto_client.create_index.assert_not_called()
def test_memory_initialization_with_config(mock_boto_client, mock_llm, mock_embedder):
"""Test Memory initialization with S3Vectors from config."""
# check that Attribute error is not raised
mock_boto_client.get_vector_bucket.return_value = {}
mock_boto_client.get_index.return_value = {}
config = {
"vector_store": {
"provider": "s3_vectors",
"config": {
"vector_bucket_name": BUCKET_NAME,
"collection_name": INDEX_NAME,
"embedding_model_dims": EMBEDDING_DIMS,
"distance_metric": "cosine",
"region_name": REGION,
},
}
}
try:
memory = Memory.from_config(config)
assert memory.vector_store is not None
assert isinstance(memory.vector_store, S3Vectors)
assert isinstance(memory.config.vector_store.config, S3VectorsConfig)
except AttributeError:
pytest.fail("Memory initialization failed")
def test_insert(mock_boto_client):
"""Test inserting vectors."""
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
vectors = [[0.1, 0.2], [0.3, 0.4]]
payloads = [{"meta": "data1"}, {"meta": "data2"}]
ids = ["id1", "id2"]
store.insert(vectors, payloads, ids)
mock_boto_client.put_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
vectors=[
{
"key": "id1",
"data": {"float32": [0.1, 0.2]},
"metadata": {"meta": "data1"},
},
{
"key": "id2",
"data": {"float32": [0.3, 0.4]},
"metadata": {"meta": "data2"},
},
],
)
def test_search(mock_boto_client):
"""Test searching for vectors."""
mock_boto_client.query_vectors.return_value = {
"vectors": [{"key": "id1", "distance": 0.9, "metadata": {"meta": "data1"}}]
}
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
query_vector = [0.1, 0.2]
results = store.search(query="test", vectors=query_vector, limit=1)
mock_boto_client.query_vectors.assert_called_once()
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.9
def test_get(mock_boto_client):
"""Test retrieving a vector by ID."""
mock_boto_client.get_vectors.return_value = {
"vectors": [{"key": "id1", "metadata": {"meta": "data1"}}]
}
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
result = store.get("id1")
mock_boto_client.get_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
keys=["id1"],
returnData=False,
returnMetadata=True,
)
assert result.id == "id1"
assert result.payload["meta"] == "data1"
def test_delete(mock_boto_client):
"""Test deleting a vector."""
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
store.delete("id1")
mock_boto_client.delete_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME, indexName=INDEX_NAME, keys=["id1"]
)
def test_reset(mock_boto_client):
"""Test resetting the vector index."""
# GIVEN: The index does not exist, so it gets created on init and reset
not_found_error = ClientError(
{"Error": {"Code": "NotFoundException"}}, "OperationName"
)
mock_boto_client.get_index.side_effect = not_found_error
# WHEN: The store is initialized
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
# THEN: The index is created once during initialization
assert mock_boto_client.create_index.call_count == 1
# WHEN: The store is reset
store.reset()
# THEN: The index is deleted and then created again
mock_boto_client.delete_index.assert_called_once_with(
vectorBucketName=BUCKET_NAME, indexName=INDEX_NAME
)
assert mock_boto_client.create_index.call_count == 2
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_s3_vectors.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:openmemory/api/app/routers/backup.py | from datetime import UTC, datetime
import io
import json
import gzip
import zipfile
from typing import Optional, List, Dict, Any
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, Form
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from sqlalchemy.orm import Session, joinedload
from sqlalchemy import and_
from app.database import get_db
from app.models import (
User, App, Memory, MemoryState, Category, memory_categories,
MemoryStatusHistory, AccessControl
)
from app.utils.memory import get_memory_client
from uuid import uuid4
router = APIRouter(prefix="/api/v1/backup", tags=["backup"])
class ExportRequest(BaseModel):
user_id: str
app_id: Optional[UUID] = None
from_date: Optional[int] = None
to_date: Optional[int] = None
include_vectors: bool = True
def _iso(dt: Optional[datetime]) -> Optional[str]:
if isinstance(dt, datetime):
try:
return dt.astimezone(UTC).isoformat()
except:
return dt.replace(tzinfo=UTC).isoformat()
return None
def _parse_iso(dt: Optional[str]) -> Optional[datetime]:
if not dt:
return None
try:
return datetime.fromisoformat(dt)
except Exception:
try:
return datetime.fromisoformat(dt.replace("Z", "+00:00"))
except Exception:
return None
def _export_sqlite(db: Session, req: ExportRequest) -> Dict[str, Any]:
user = db.query(User).filter(User.user_id == req.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
time_filters = []
if req.from_date:
time_filters.append(Memory.created_at >= datetime.fromtimestamp(req.from_date, tz=UTC))
if req.to_date:
time_filters.append(Memory.created_at <= datetime.fromtimestamp(req.to_date, tz=UTC))
mem_q = (
db.query(Memory)
.options(joinedload(Memory.categories), joinedload(Memory.app))
.filter(
Memory.user_id == user.id,
*(time_filters or []),
* ( [Memory.app_id == req.app_id] if req.app_id else [] ),
)
)
memories = mem_q.all()
memory_ids = [m.id for m in memories]
app_ids = sorted({m.app_id for m in memories if m.app_id})
apps = db.query(App).filter(App.id.in_(app_ids)).all() if app_ids else []
cats = sorted({c for m in memories for c in m.categories}, key = lambda c: str(c.id))
mc_rows = db.execute(
memory_categories.select().where(memory_categories.c.memory_id.in_(memory_ids))
).fetchall() if memory_ids else []
history = db.query(MemoryStatusHistory).filter(MemoryStatusHistory.memory_id.in_(memory_ids)).all() if memory_ids else []
acls = db.query(AccessControl).filter(
AccessControl.subject_type == "app",
AccessControl.subject_id.in_(app_ids) if app_ids else False
).all() if app_ids else []
return {
"user": {
"id": str(user.id),
"user_id": user.user_id,
"name": user.name,
"email": user.email,
"metadata": user.metadata_,
"created_at": _iso(user.created_at),
"updated_at": _iso(user.updated_at)
},
"apps": [
{
"id": str(a.id),
"owner_id": str(a.owner_id),
"name": a.name,
"description": a.description,
"metadata": a.metadata_,
"is_active": a.is_active,
"created_at": _iso(a.created_at),
"updated_at": _iso(a.updated_at),
}
for a in apps
],
"categories": [
{
"id": str(c.id),
"name": c.name,
"description": c.description,
"created_at": _iso(c.created_at),
"updated_at": _iso(c.updated_at),
}
for c in cats
],
"memories": [
{
"id": str(m.id),
"user_id": str(m.user_id),
"app_id": str(m.app_id) if m.app_id else None,
"content": m.content,
"metadata": m.metadata_,
"state": m.state.value,
"created_at": _iso(m.created_at),
"updated_at": _iso(m.updated_at),
"archived_at": _iso(m.archived_at),
"deleted_at": _iso(m.deleted_at),
"category_ids": [str(c.id) for c in m.categories], #TODO: figure out a way to add category names simply to this
}
for m in memories
],
"memory_categories": [
{"memory_id": str(r.memory_id), "category_id": str(r.category_id)}
for r in mc_rows
],
"status_history": [
{
"id": str(h.id),
"memory_id": str(h.memory_id),
"changed_by": str(h.changed_by),
"old_state": h.old_state.value,
"new_state": h.new_state.value,
"changed_at": _iso(h.changed_at),
}
for h in history
],
"access_controls": [
{
"id": str(ac.id),
"subject_type": ac.subject_type,
"subject_id": str(ac.subject_id) if ac.subject_id else None,
"object_type": ac.object_type,
"object_id": str(ac.object_id) if ac.object_id else None,
"effect": ac.effect,
"created_at": _iso(ac.created_at),
}
for ac in acls
],
"export_meta": {
"app_id_filter": str(req.app_id) if req.app_id else None,
"from_date": req.from_date,
"to_date": req.to_date,
"version": "1",
"generated_at": datetime.now(UTC).isoformat(),
},
}
def _export_logical_memories_gz(
db: Session,
*,
user_id: str,
app_id: Optional[UUID] = None,
from_date: Optional[int] = None,
to_date: Optional[int] = None
) -> bytes:
"""
Export a provider-agnostic backup of memories so they can be restored to any vector DB
by re-embedding content. One JSON object per line, gzip-compressed.
Schema (per line):
{
"id": "<uuid>",
"content": "<text>",
"metadata": {...},
"created_at": "<iso8601 or null>",
"updated_at": "<iso8601 or null>",
"state": "active|paused|archived|deleted",
"app": "<app name or null>",
"categories": ["catA", "catB", ...]
}
"""
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
time_filters = []
if from_date:
time_filters.append(Memory.created_at >= datetime.fromtimestamp(from_date, tz=UTC))
if to_date:
time_filters.append(Memory.created_at <= datetime.fromtimestamp(to_date, tz=UTC))
q = (
db.query(Memory)
.options(joinedload(Memory.categories), joinedload(Memory.app))
.filter(
Memory.user_id == user.id,
*(time_filters or []),
)
)
if app_id:
q = q.filter(Memory.app_id == app_id)
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as gz:
for m in q.all():
record = {
"id": str(m.id),
"content": m.content,
"metadata": m.metadata_ or {},
"created_at": _iso(m.created_at),
"updated_at": _iso(m.updated_at),
"state": m.state.value,
"app": m.app.name if m.app else None,
"categories": [c.name for c in m.categories],
}
gz.write((json.dumps(record) + "\n").encode("utf-8"))
return buf.getvalue()
@router.post("/export")
async def export_backup(req: ExportRequest, db: Session = Depends(get_db)):
sqlite_payload = _export_sqlite(db=db, req=req)
memories_blob = _export_logical_memories_gz(
db=db,
user_id=req.user_id,
app_id=req.app_id,
from_date=req.from_date,
to_date=req.to_date,
)
#TODO: add vector store specific exports in future for speed
zip_buf = io.BytesIO()
with zipfile.ZipFile(zip_buf, "w", compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr("memories.json", json.dumps(sqlite_payload, indent=2))
zf.writestr("memories.jsonl.gz", memories_blob)
zip_buf.seek(0)
return StreamingResponse(
zip_buf,
media_type="application/zip",
headers={"Content-Disposition": f'attachment; filename="memories_export_{req.user_id}.zip"'},
)
@router.post("/import")
async def import_backup(
file: UploadFile = File(..., description="Zip with memories.json and memories.jsonl.gz"),
user_id: str = Form(..., description="Import memories into this user_id"),
mode: str = Query("overwrite"),
db: Session = Depends(get_db)
):
if not file.filename.endswith(".zip"):
raise HTTPException(status_code=400, detail="Expected a zip file.")
if mode not in {"skip", "overwrite"}:
raise HTTPException(status_code=400, detail="Invalid mode. Must be 'skip' or 'overwrite'.")
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
content = await file.read()
try:
with zipfile.ZipFile(io.BytesIO(content), "r") as zf:
names = zf.namelist()
def find_member(filename: str) -> Optional[str]:
for name in names:
# Skip directory entries
if name.endswith('/'):
continue
if name.rsplit('/', 1)[-1] == filename:
return name
return None
sqlite_member = find_member("memories.json")
if not sqlite_member:
raise HTTPException(status_code=400, detail="memories.json missing in zip")
memories_member = find_member("memories.jsonl.gz")
sqlite_data = json.loads(zf.read(sqlite_member))
memories_blob = zf.read(memories_member) if memories_member else None
except Exception:
raise HTTPException(status_code=400, detail="Invalid zip file")
default_app = db.query(App).filter(App.owner_id == user.id, App.name == "openmemory").first()
if not default_app:
default_app = App(owner_id=user.id, name="openmemory", is_active=True, metadata_={})
db.add(default_app)
db.commit()
db.refresh(default_app)
cat_id_map: Dict[str, UUID] = {}
for c in sqlite_data.get("categories", []):
cat = db.query(Category).filter(Category.name == c["name"]).first()
if not cat:
cat = Category(name=c["name"], description=c.get("description"))
db.add(cat)
db.commit()
db.refresh(cat)
cat_id_map[c["id"]] = cat.id
old_to_new_id: Dict[str, UUID] = {}
for m in sqlite_data.get("memories", []):
incoming_id = UUID(m["id"])
existing = db.query(Memory).filter(Memory.id == incoming_id).first()
# Cross-user collision: always mint a new UUID and import as a new memory
if existing and existing.user_id != user.id:
target_id = uuid4()
else:
target_id = incoming_id
old_to_new_id[m["id"]] = target_id
# Same-user collision + skip mode: leave existing row untouched
if existing and (existing.user_id == user.id) and mode == "skip":
continue
# Same-user collision + overwrite mode: treat import as ground truth
if existing and (existing.user_id == user.id) and mode == "overwrite":
incoming_state = m.get("state", "active")
existing.user_id = user.id
existing.app_id = default_app.id
existing.content = m.get("content") or ""
existing.metadata_ = m.get("metadata") or {}
try:
existing.state = MemoryState(incoming_state)
except Exception:
existing.state = MemoryState.active
# Update state-related timestamps from import (ground truth)
existing.archived_at = _parse_iso(m.get("archived_at"))
existing.deleted_at = _parse_iso(m.get("deleted_at"))
existing.created_at = _parse_iso(m.get("created_at")) or existing.created_at
existing.updated_at = _parse_iso(m.get("updated_at")) or existing.updated_at
db.add(existing)
db.commit()
continue
new_mem = Memory(
id=target_id,
user_id=user.id,
app_id=default_app.id,
content=m.get("content") or "",
metadata_=m.get("metadata") or {},
state=MemoryState(m.get("state", "active")) if m.get("state") else MemoryState.active,
created_at=_parse_iso(m.get("created_at")) or datetime.now(UTC),
updated_at=_parse_iso(m.get("updated_at")) or datetime.now(UTC),
archived_at=_parse_iso(m.get("archived_at")),
deleted_at=_parse_iso(m.get("deleted_at")),
)
db.add(new_mem)
db.commit()
for link in sqlite_data.get("memory_categories", []):
mid = old_to_new_id.get(link["memory_id"])
cid = cat_id_map.get(link["category_id"])
if not (mid and cid):
continue
exists = db.execute(
memory_categories.select().where(
(memory_categories.c.memory_id == mid) & (memory_categories.c.category_id == cid)
)
).first()
if not exists:
db.execute(memory_categories.insert().values(memory_id=mid, category_id=cid))
db.commit()
for h in sqlite_data.get("status_history", []):
hid = UUID(h["id"])
mem_id = old_to_new_id.get(h["memory_id"], UUID(h["memory_id"]))
exists = db.query(MemoryStatusHistory).filter(MemoryStatusHistory.id == hid).first()
if exists and mode == "skip":
continue
rec = exists if exists else MemoryStatusHistory(id=hid)
rec.memory_id = mem_id
rec.changed_by = user.id
try:
rec.old_state = MemoryState(h.get("old_state", "active"))
rec.new_state = MemoryState(h.get("new_state", "active"))
except Exception:
rec.old_state = MemoryState.active
rec.new_state = MemoryState.active
rec.changed_at = _parse_iso(h.get("changed_at")) or datetime.now(UTC)
db.add(rec)
db.commit()
memory_client = get_memory_client()
vector_store = getattr(memory_client, "vector_store", None) if memory_client else None
if vector_store and memory_client and hasattr(memory_client, "embedding_model"):
def iter_logical_records():
if memories_blob:
gz_buf = io.BytesIO(memories_blob)
with gzip.GzipFile(fileobj=gz_buf, mode="rb") as gz:
for raw in gz:
yield json.loads(raw.decode("utf-8"))
else:
for m in sqlite_data.get("memories", []):
yield {
"id": m["id"],
"content": m.get("content"),
"metadata": m.get("metadata") or {},
"created_at": m.get("created_at"),
"updated_at": m.get("updated_at"),
}
for rec in iter_logical_records():
old_id = rec["id"]
new_id = old_to_new_id.get(old_id, UUID(old_id))
content = rec.get("content") or ""
metadata = rec.get("metadata") or {}
created_at = rec.get("created_at")
updated_at = rec.get("updated_at")
if mode == "skip":
try:
get_fn = getattr(vector_store, "get", None)
if callable(get_fn) and vector_store.get(str(new_id)):
continue
except Exception:
pass
payload = dict(metadata)
payload["data"] = content
if created_at:
payload["created_at"] = created_at
if updated_at:
payload["updated_at"] = updated_at
payload["user_id"] = user_id
payload.setdefault("source_app", "openmemory")
try:
vec = memory_client.embedding_model.embed(content, "add")
vector_store.insert(vectors=[vec], payloads=[payload], ids=[str(new_id)])
except Exception as e:
print(f"Vector upsert failed for memory {new_id}: {e}")
continue
return {"message": f'Import completed into user "{user_id}"'}
return {"message": f'Import completed into user "{user_id}"'}
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/routers/backup.py",
"license": "Apache License 2.0",
"lines": 409,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/llms/test_azure_openai_structured.py | from unittest import mock
from mem0.llms.azure_openai_structured import SCOPE, AzureOpenAIStructuredLLM
class DummyAzureKwargs:
def __init__(
self,
api_key=None,
azure_deployment="test-deployment",
azure_endpoint="https://test-endpoint.openai.azure.com",
api_version="2024-06-01-preview",
default_headers=None,
):
self.api_key = api_key
self.azure_deployment = azure_deployment
self.azure_endpoint = azure_endpoint
self.api_version = api_version
self.default_headers = default_headers
class DummyConfig:
def __init__(
self,
model=None,
azure_kwargs=None,
temperature=0.7,
max_tokens=256,
top_p=1.0,
http_client=None,
):
self.model = model
self.azure_kwargs = azure_kwargs or DummyAzureKwargs()
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.http_client = http_client
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
def test_init_with_api_key(mock_azure_openai):
config = DummyConfig(model="test-model", azure_kwargs=DummyAzureKwargs(api_key="real-key"))
llm = AzureOpenAIStructuredLLM(config)
assert llm.config.model == "test-model"
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] == "real-key"
assert kwargs["azure_ad_token_provider"] is None
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
@mock.patch("mem0.llms.azure_openai_structured.get_bearer_token_provider")
@mock.patch("mem0.llms.azure_openai_structured.DefaultAzureCredential")
def test_init_with_default_credential(mock_credential, mock_token_provider, mock_azure_openai):
config = DummyConfig(model=None, azure_kwargs=DummyAzureKwargs(api_key=None))
mock_token_provider.return_value = "token-provider"
llm = AzureOpenAIStructuredLLM(config)
# Should set default model if not provided
assert llm.config.model == "gpt-4.1-nano-2025-04-14"
mock_credential.assert_called_once()
mock_token_provider.assert_called_once_with(mock_credential.return_value, SCOPE)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_ad_token_provider"] == "token-provider"
def test_init_with_env_vars(monkeypatch, mocker):
mock_azure_openai = mocker.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
monkeypatch.setenv("LLM_AZURE_DEPLOYMENT", "test-deployment")
monkeypatch.setenv("LLM_AZURE_ENDPOINT", "https://test-endpoint.openai.azure.com")
monkeypatch.setenv("LLM_AZURE_API_VERSION", "2024-06-01-preview")
config = DummyConfig(model="test-model", azure_kwargs=DummyAzureKwargs(api_key=None))
AzureOpenAIStructuredLLM(config)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_deployment"] == "test-deployment"
assert kwargs["azure_endpoint"] == "https://test-endpoint.openai.azure.com"
assert kwargs["api_version"] == "2024-06-01-preview"
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
def test_init_with_placeholder_api_key_uses_default_credential(
mock_azure_openai,
):
with (
mock.patch("mem0.llms.azure_openai_structured.DefaultAzureCredential") as mock_credential,
mock.patch("mem0.llms.azure_openai_structured.get_bearer_token_provider") as mock_token_provider,
):
config = DummyConfig(model=None, azure_kwargs=DummyAzureKwargs(api_key="your-api-key"))
mock_token_provider.return_value = "token-provider"
llm = AzureOpenAIStructuredLLM(config)
assert llm.config.model == "gpt-4.1-nano-2025-04-14"
mock_credential.assert_called_once()
mock_token_provider.assert_called_once_with(mock_credential.return_value, SCOPE)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_ad_token_provider"] == "token-provider"
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/llms/test_azure_openai_structured.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/llms/aws_bedrock.py | import os
from typing import Any, Dict, List, Optional
from mem0.configs.llms.base import BaseLlmConfig
class AWSBedrockConfig(BaseLlmConfig):
"""
Configuration class for AWS Bedrock LLM integration.
Supports all available Bedrock models with automatic provider detection.
"""
def __init__(
self,
model: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 2000,
top_p: float = 0.9,
top_k: int = 1,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_region: str = "",
aws_session_token: Optional[str] = None,
aws_profile: Optional[str] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
"""
Initialize AWS Bedrock configuration.
Args:
model: Bedrock model identifier (e.g., "amazon.nova-3-mini-20241119-v1:0")
temperature: Controls randomness (0.0 to 2.0)
max_tokens: Maximum tokens to generate
top_p: Nucleus sampling parameter (0.0 to 1.0)
top_k: Top-k sampling parameter (1 to 40)
aws_access_key_id: AWS access key (optional, uses env vars if not provided)
aws_secret_access_key: AWS secret key (optional, uses env vars if not provided)
aws_region: AWS region for Bedrock service
aws_session_token: AWS session token for temporary credentials
aws_profile: AWS profile name for credentials
model_kwargs: Additional model-specific parameters
**kwargs: Additional arguments passed to base class
"""
super().__init__(
model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
**kwargs,
)
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_region = aws_region or os.getenv("AWS_REGION", "us-west-2")
self.aws_session_token = aws_session_token
self.aws_profile = aws_profile
self.model_kwargs = model_kwargs or {}
@property
def provider(self) -> str:
"""Get the provider from the model identifier."""
if not self.model or "." not in self.model:
return "unknown"
return self.model.split(".")[0]
@property
def model_name(self) -> str:
"""Get the model name without provider prefix."""
if not self.model or "." not in self.model:
return self.model
return ".".join(self.model.split(".")[1:])
def get_model_config(self) -> Dict[str, Any]:
"""Get model-specific configuration parameters."""
base_config = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
}
# Add custom model kwargs
base_config.update(self.model_kwargs)
return base_config
def get_aws_config(self) -> Dict[str, Any]:
"""Get AWS configuration parameters."""
config = {
"region_name": self.aws_region,
}
if self.aws_access_key_id:
config["aws_access_key_id"] = self.aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
if self.aws_secret_access_key:
config["aws_secret_access_key"] = self.aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY")
if self.aws_session_token:
config["aws_session_token"] = self.aws_session_token or os.getenv("AWS_SESSION_TOKEN")
if self.aws_profile:
config["profile_name"] = self.aws_profile or os.getenv("AWS_PROFILE")
return config
def validate_model_format(self) -> bool:
"""
Validate that the model identifier follows Bedrock naming convention.
Returns:
True if valid, False otherwise
"""
if not self.model:
return False
# Check if model follows provider.model-name format
if "." not in self.model:
return False
provider, model_name = self.model.split(".", 1)
# Validate provider
valid_providers = [
"ai21", "amazon", "anthropic", "cohere", "meta", "mistral",
"stability", "writer", "deepseek", "gpt-oss", "perplexity",
"snowflake", "titan", "command", "j2", "llama"
]
if provider not in valid_providers:
return False
# Validate model name is not empty
if not model_name:
return False
return True
def get_supported_regions(self) -> List[str]:
"""Get list of AWS regions that support Bedrock."""
return [
"us-east-1",
"us-west-2",
"us-east-2",
"eu-west-1",
"ap-southeast-1",
"ap-northeast-1",
]
def get_model_capabilities(self) -> Dict[str, Any]:
"""Get model capabilities based on provider."""
capabilities = {
"supports_tools": False,
"supports_vision": False,
"supports_streaming": False,
"supports_multimodal": False,
}
if self.provider == "anthropic":
capabilities.update({
"supports_tools": True,
"supports_vision": True,
"supports_streaming": True,
"supports_multimodal": True,
})
elif self.provider == "amazon":
capabilities.update({
"supports_tools": True,
"supports_vision": True,
"supports_streaming": True,
"supports_multimodal": True,
})
elif self.provider == "cohere":
capabilities.update({
"supports_tools": True,
"supports_streaming": True,
})
elif self.provider == "meta":
capabilities.update({
"supports_vision": True,
"supports_streaming": True,
})
elif self.provider == "mistral":
capabilities.update({
"supports_vision": True,
"supports_streaming": True,
})
return capabilities
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/aws_bedrock.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/configs/vector_stores/databricks.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from databricks.sdk.service.vectorsearch import EndpointType, VectorIndexType, PipelineType
class DatabricksConfig(BaseModel):
"""Configuration for Databricks Vector Search vector store."""
workspace_url: str = Field(..., description="Databricks workspace URL")
access_token: Optional[str] = Field(None, description="Personal access token for authentication")
client_id: Optional[str] = Field(None, description="Databricks Service principal client ID")
client_secret: Optional[str] = Field(None, description="Databricks Service principal client secret")
azure_client_id: Optional[str] = Field(None, description="Azure AD application client ID (for Azure Databricks)")
azure_client_secret: Optional[str] = Field(
None, description="Azure AD application client secret (for Azure Databricks)"
)
endpoint_name: str = Field(..., description="Vector search endpoint name")
catalog: str = Field(..., description="The Unity Catalog catalog name")
schema: str = Field(..., description="The Unity Catalog schama name")
table_name: str = Field(..., description="Source Delta table name")
collection_name: str = Field("mem0", description="Vector search index name")
index_type: VectorIndexType = Field("DELTA_SYNC", description="Index type: DELTA_SYNC or DIRECT_ACCESS")
embedding_model_endpoint_name: Optional[str] = Field(
None, description="Embedding model endpoint for Databricks-computed embeddings"
)
embedding_dimension: int = Field(1536, description="Vector embedding dimensions")
endpoint_type: EndpointType = Field("STANDARD", description="Endpoint type: STANDARD or STORAGE_OPTIMIZED")
pipeline_type: PipelineType = Field("TRIGGERED", description="Sync pipeline type: TRIGGERED or CONTINUOUS")
warehouse_name: Optional[str] = Field(None, description="Databricks SQL warehouse Name")
query_type: str = Field("ANN", description="Query type: `ANN` and `HYBRID`")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
@model_validator(mode="after")
def validate_authentication(self):
"""Validate that either access_token or service principal credentials are provided."""
has_token = self.access_token is not None
has_service_principal = (self.client_id is not None and self.client_secret is not None) or (
self.azure_client_id is not None and self.azure_client_secret is not None
)
if not has_token and not has_service_principal:
raise ValueError(
"Either access_token or both client_id/client_secret or azure_client_id/azure_client_secret must be provided"
)
return self
model_config = ConfigDict(arbitrary_types_allowed=True)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/databricks.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/vector_stores/databricks.py | import json
import logging
import uuid
from typing import Optional, List
from datetime import datetime, date
from databricks.sdk.service.catalog import ColumnInfo, ColumnTypeName, TableType, DataSourceFormat
from databricks.sdk.service.catalog import TableConstraint, PrimaryKeyConstraint
from databricks.sdk import WorkspaceClient
from databricks.sdk.service.vectorsearch import (
VectorIndexType,
DeltaSyncVectorIndexSpecRequest,
DirectAccessVectorIndexSpec,
EmbeddingSourceColumn,
EmbeddingVectorColumn,
)
from pydantic import BaseModel
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class MemoryResult(BaseModel):
id: Optional[str] = None
score: Optional[float] = None
payload: Optional[dict] = None
excluded_keys = {"user_id", "agent_id", "run_id", "hash", "data", "created_at", "updated_at"}
class Databricks(VectorStoreBase):
def __init__(
self,
workspace_url: str,
access_token: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
azure_client_id: Optional[str] = None,
azure_client_secret: Optional[str] = None,
endpoint_name: str = None,
catalog: str = None,
schema: str = None,
table_name: str = None,
collection_name: str = "mem0",
index_type: str = "DELTA_SYNC",
embedding_model_endpoint_name: Optional[str] = None,
embedding_dimension: int = 1536,
endpoint_type: str = "STANDARD",
pipeline_type: str = "TRIGGERED",
warehouse_name: Optional[str] = None,
query_type: str = "ANN",
):
"""
Initialize the Databricks Vector Search vector store.
Args:
workspace_url (str): Databricks workspace URL.
access_token (str, optional): Personal access token for authentication.
client_id (str, optional): Service principal client ID for authentication.
client_secret (str, optional): Service principal client secret for authentication.
azure_client_id (str, optional): Azure AD application client ID (for Azure Databricks).
azure_client_secret (str, optional): Azure AD application client secret (for Azure Databricks).
endpoint_name (str): Vector search endpoint name.
catalog (str): Unity Catalog catalog name.
schema (str): Unity Catalog schema name.
table_name (str): Source Delta table name.
index_name (str, optional): Vector search index name (default: "mem0").
index_type (str, optional): Index type, either "DELTA_SYNC" or "DIRECT_ACCESS" (default: "DELTA_SYNC").
embedding_model_endpoint_name (str, optional): Embedding model endpoint for Databricks-computed embeddings.
embedding_dimension (int, optional): Vector embedding dimensions (default: 1536).
endpoint_type (str, optional): Endpoint type, either "STANDARD" or "STORAGE_OPTIMIZED" (default: "STANDARD").
pipeline_type (str, optional): Sync pipeline type, either "TRIGGERED" or "CONTINUOUS" (default: "TRIGGERED").
warehouse_name (str, optional): Databricks SQL warehouse Name (if using SQL warehouse).
query_type (str, optional): Query type, either "ANN" or "HYBRID" (default: "ANN").
"""
# Basic identifiers
self.workspace_url = workspace_url
self.endpoint_name = endpoint_name
self.catalog = catalog
self.schema = schema
self.table_name = table_name
self.fully_qualified_table_name = f"{self.catalog}.{self.schema}.{self.table_name}"
self.index_name = collection_name
self.fully_qualified_index_name = f"{self.catalog}.{self.schema}.{self.index_name}"
# Configuration
self.index_type = index_type
self.embedding_model_endpoint_name = embedding_model_endpoint_name
self.embedding_dimension = embedding_dimension
self.endpoint_type = endpoint_type
self.pipeline_type = pipeline_type
self.query_type = query_type
# Schema
self.columns = [
ColumnInfo(
name="memory_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
nullable=False,
comment="Primary key",
position=0,
),
ColumnInfo(
name="hash",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Hash of the memory content",
position=1,
),
ColumnInfo(
name="agent_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the agent",
position=2,
),
ColumnInfo(
name="run_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the run",
position=3,
),
ColumnInfo(
name="user_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the user",
position=4,
),
ColumnInfo(
name="memory",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Memory content",
position=5,
),
ColumnInfo(
name="metadata",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Additional metadata",
position=6,
),
ColumnInfo(
name="created_at",
type_name=ColumnTypeName.TIMESTAMP,
type_text="timestamp",
type_json='{"type":"timestamp"}',
comment="Creation timestamp",
position=7,
),
ColumnInfo(
name="updated_at",
type_name=ColumnTypeName.TIMESTAMP,
type_text="timestamp",
type_json='{"type":"timestamp"}',
comment="Last update timestamp",
position=8,
),
]
if self.index_type == VectorIndexType.DIRECT_ACCESS:
self.columns.append(
ColumnInfo(
name="embedding",
type_name=ColumnTypeName.ARRAY,
type_text="array<float>",
type_json='{"type":"array","element":"float","element_nullable":false}',
nullable=True,
comment="Embedding vector",
position=9,
)
)
self.column_names = [col.name for col in self.columns]
# Initialize Databricks workspace client
client_config = {}
if client_id and client_secret:
client_config.update(
{
"host": workspace_url,
"client_id": client_id,
"client_secret": client_secret,
}
)
elif azure_client_id and azure_client_secret:
client_config.update(
{
"host": workspace_url,
"azure_client_id": azure_client_id,
"azure_client_secret": azure_client_secret,
}
)
elif access_token:
client_config.update({"host": workspace_url, "token": access_token})
else:
# Try automatic authentication
client_config["host"] = workspace_url
try:
self.client = WorkspaceClient(**client_config)
logger.info("Initialized Databricks workspace client")
except Exception as e:
logger.error(f"Failed to initialize Databricks workspace client: {e}")
raise
# Get the warehouse ID by name
self.warehouse_id = next((w.id for w in self.client.warehouses.list() if w.name == warehouse_name), None)
# Initialize endpoint (required in Databricks)
self._ensure_endpoint_exists()
# Check if index exists and create if needed
collections = self.list_cols()
if self.fully_qualified_index_name not in collections:
self.create_col()
def _ensure_endpoint_exists(self):
"""Ensure the vector search endpoint exists, create if it doesn't."""
try:
self.client.vector_search_endpoints.get_endpoint(endpoint_name=self.endpoint_name)
logger.info(f"Vector search endpoint '{self.endpoint_name}' already exists")
except Exception:
# Endpoint doesn't exist, create it
try:
logger.info(f"Creating vector search endpoint '{self.endpoint_name}' with type '{self.endpoint_type}'")
self.client.vector_search_endpoints.create_endpoint_and_wait(
name=self.endpoint_name, endpoint_type=self.endpoint_type
)
logger.info(f"Successfully created vector search endpoint '{self.endpoint_name}'")
except Exception as e:
logger.error(f"Failed to create vector search endpoint '{self.endpoint_name}': {e}")
raise
def _ensure_source_table_exists(self):
"""Ensure the source Delta table exists with the proper schema."""
check = self.client.tables.exists(self.fully_qualified_table_name)
if check.table_exists:
logger.info(f"Source table '{self.fully_qualified_table_name}' already exists")
else:
logger.info(f"Source table '{self.fully_qualified_table_name}' does not exist, creating it...")
self.client.tables.create(
name=self.table_name,
catalog_name=self.catalog,
schema_name=self.schema,
table_type=TableType.MANAGED,
data_source_format=DataSourceFormat.DELTA,
storage_location=None, # Use default storage location
columns=self.columns,
properties={"delta.enableChangeDataFeed": "true"},
)
logger.info(f"Successfully created source table '{self.fully_qualified_table_name}'")
self.client.table_constraints.create(
full_name_arg="logistics_dev.ai.dev_memory",
constraint=TableConstraint(
primary_key_constraint=PrimaryKeyConstraint(
name="pk_dev_memory", # Name of the primary key constraint
child_columns=["memory_id"], # Columns that make up the primary key
)
),
)
logger.info(
f"Successfully created primary key constraint on 'memory_id' for table '{self.fully_qualified_table_name}'"
)
def create_col(self, name=None, vector_size=None, distance=None):
"""
Create a new collection (index).
Args:
name (str, optional): Index name. If provided, will create a new index using the provided source_table_name.
vector_size (int, optional): Vector dimension size.
distance (str, optional): Distance metric (not directly applicable for Databricks).
Returns:
The index object.
"""
# Determine index configuration
embedding_dims = vector_size or self.embedding_dimension
embedding_source_columns = [
EmbeddingSourceColumn(
name="memory",
embedding_model_endpoint_name=self.embedding_model_endpoint_name,
)
]
logger.info(f"Creating vector search index '{self.fully_qualified_index_name}'")
# First, ensure the source Delta table exists
self._ensure_source_table_exists()
if self.index_type not in [VectorIndexType.DELTA_SYNC, VectorIndexType.DIRECT_ACCESS]:
raise ValueError("index_type must be either 'DELTA_SYNC' or 'DIRECT_ACCESS'")
try:
if self.index_type == VectorIndexType.DELTA_SYNC:
index = self.client.vector_search_indexes.create_index(
name=self.fully_qualified_index_name,
endpoint_name=self.endpoint_name,
primary_key="memory_id",
index_type=self.index_type,
delta_sync_index_spec=DeltaSyncVectorIndexSpecRequest(
source_table=self.fully_qualified_table_name,
pipeline_type=self.pipeline_type,
columns_to_sync=self.column_names,
embedding_source_columns=embedding_source_columns,
),
)
logger.info(
f"Successfully created vector search index '{self.fully_qualified_index_name}' with DELTA_SYNC type"
)
return index
elif self.index_type == VectorIndexType.DIRECT_ACCESS:
index = self.client.vector_search_indexes.create_index(
name=self.fully_qualified_index_name,
endpoint_name=self.endpoint_name,
primary_key="memory_id",
index_type=self.index_type,
direct_access_index_spec=DirectAccessVectorIndexSpec(
embedding_source_columns=embedding_source_columns,
embedding_vector_columns=[
EmbeddingVectorColumn(name="embedding", embedding_dimension=embedding_dims)
],
),
)
logger.info(
f"Successfully created vector search index '{self.fully_qualified_index_name}' with DIRECT_ACCESS type"
)
return index
except Exception as e:
logger.error(f"Error making index_type: {self.index_type} for index {self.fully_qualified_index_name}: {e}")
def _format_sql_value(self, v):
"""
Format a Python value into a safe SQL literal for Databricks.
"""
if v is None:
return "NULL"
if isinstance(v, bool):
return "TRUE" if v else "FALSE"
if isinstance(v, (int, float)):
return str(v)
if isinstance(v, (datetime, date)):
return f"'{v.isoformat()}'"
if isinstance(v, list):
# Render arrays (assume numeric or string elements)
elems = []
for x in v:
if x is None:
elems.append("NULL")
elif isinstance(x, (int, float)):
elems.append(str(x))
else:
s = str(x).replace("'", "''")
elems.append(f"'{s}'")
return f"array({', '.join(elems)})"
if isinstance(v, dict):
try:
s = json.dumps(v)
except Exception:
s = str(v)
s = s.replace("'", "''")
return f"'{s}'"
# Fallback: treat as string
s = str(v).replace("'", "''")
return f"'{s}'"
def insert(self, vectors: list, payloads: list = None, ids: list = None):
"""
Insert vectors into the index.
Args:
vectors (List[List[float]]): List of vectors to insert.
payloads (List[Dict], optional): List of payloads corresponding to vectors.
ids (List[str], optional): List of IDs corresponding to vectors.
"""
# Determine the number of items to process
num_items = len(payloads) if payloads else len(vectors) if vectors else 0
value_tuples = []
for i in range(num_items):
values = []
for col in self.columns:
if col.name == "memory_id":
val = ids[i] if ids and i < len(ids) else str(uuid.uuid4())
elif col.name == "embedding":
val = vectors[i] if vectors and i < len(vectors) else []
elif col.name == "memory":
val = payloads[i].get("data") if payloads and i < len(payloads) else None
else:
val = payloads[i].get(col.name) if payloads and i < len(payloads) else None
values.append(val)
formatted = [self._format_sql_value(v) for v in values]
value_tuples.append(f"({', '.join(formatted)})")
insert_sql = f"INSERT INTO {self.fully_qualified_table_name} ({', '.join(self.column_names)}) VALUES {', '.join(value_tuples)}"
# Execute the insert
try:
response = self.client.statement_execution.execute_statement(
statement=insert_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(
f"Successfully inserted {num_items} items into Delta table {self.fully_qualified_table_name}"
)
return
else:
logger.error(f"Failed to insert items: {response.status.error}")
raise Exception(f"Insert operation failed: {response.status.error}")
except Exception as e:
logger.error(f"Insert operation failed: {e}")
raise
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> List[MemoryResult]:
"""
Search for similar vectors or text using the Databricks Vector Search index.
Args:
query (str): Search query text (for text-based search).
vectors (list): Query vector (for vector-based search).
limit (int): Maximum number of results.
filters (dict): Filters to apply.
Returns:
List of MemoryResult objects.
"""
try:
filters_json = json.dumps(filters) if filters else None
# Choose query type
if self.index_type == VectorIndexType.DELTA_SYNC and query:
# Text-based search
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_text=query,
num_results=limit,
query_type=self.query_type,
filters_json=filters_json,
)
elif self.index_type == VectorIndexType.DIRECT_ACCESS and vectors:
# Vector-based search
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_vector=vectors,
num_results=limit,
query_type=self.query_type,
filters_json=filters_json,
)
else:
raise ValueError("Must provide query text for DELTA_SYNC or vectors for DIRECT_ACCESS.")
# Parse results
result_data = sdk_results.result if hasattr(sdk_results, "result") else sdk_results
data_array = result_data.data_array if getattr(result_data, "data_array", None) else []
memory_results = []
for row in data_array:
# Map columns to values
row_dict = dict(zip(self.column_names, row)) if isinstance(row, (list, tuple)) else row
score = row_dict.get("score") or (
row[-1] if isinstance(row, (list, tuple)) and len(row) > len(self.column_names) else None
)
payload = {k: row_dict.get(k) for k in self.column_names}
payload["data"] = payload.get("memory", "")
memory_id = row_dict.get("memory_id") or row_dict.get("id")
memory_results.append(MemoryResult(id=memory_id, score=score, payload=payload))
return memory_results
except Exception as e:
logger.error(f"Search failed: {e}")
raise
def delete(self, vector_id):
"""
Delete a vector by ID from the Delta table.
Args:
vector_id (str): ID of the vector to delete.
"""
try:
logger.info(f"Deleting vector with ID {vector_id} from Delta table {self.fully_qualified_table_name}")
delete_sql = f"DELETE FROM {self.fully_qualified_table_name} WHERE memory_id = '{vector_id}'"
response = self.client.statement_execution.execute_statement(
statement=delete_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(f"Successfully deleted vector with ID {vector_id}")
else:
logger.error(f"Failed to delete vector with ID {vector_id}: {response.status.error}")
except Exception as e:
logger.error(f"Delete operation failed for vector ID {vector_id}: {e}")
raise
def update(self, vector_id=None, vector=None, payload=None):
"""
Update a vector and its payload in the Delta table.
Args:
vector_id (str): ID of the vector to update.
vector (list, optional): New vector values.
payload (dict, optional): New payload data.
"""
update_sql = f"UPDATE {self.fully_qualified_table_name} SET "
set_clauses = []
if not vector_id:
logger.error("vector_id is required for update operation")
return
if vector is not None:
if not isinstance(vector, list):
logger.error("vector must be a list of float values")
return
set_clauses.append(f"embedding = {vector}")
if payload:
if not isinstance(payload, dict):
logger.error("payload must be a dictionary")
return
for key, value in payload.items():
if key not in excluded_keys:
set_clauses.append(f"{key} = '{value}'")
if not set_clauses:
logger.error("No fields to update")
return
update_sql += ", ".join(set_clauses)
update_sql += f" WHERE memory_id = '{vector_id}'"
try:
logger.info(f"Updating vector with ID {vector_id} in Delta table {self.fully_qualified_table_name}")
response = self.client.statement_execution.execute_statement(
statement=update_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(f"Successfully updated vector with ID {vector_id}")
else:
logger.error(f"Failed to update vector with ID {vector_id}: {response.status.error}")
except Exception as e:
logger.error(f"Update operation failed for vector ID {vector_id}: {e}")
raise
def get(self, vector_id) -> MemoryResult:
"""
Retrieve a vector by ID.
Args:
vector_id (str): ID of the vector to retrieve.
Returns:
MemoryResult: The retrieved vector.
"""
try:
# Use query with ID filter to retrieve the specific vector
filters = {"memory_id": vector_id}
filters_json = json.dumps(filters)
results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_text=" ", # Empty query, rely on filters
num_results=1,
query_type=self.query_type,
filters_json=filters_json,
)
# Process results
result_data = results.result if hasattr(results, "result") else results
data_array = result_data.data_array if hasattr(result_data, "data_array") else []
if not data_array:
raise KeyError(f"Vector with ID {vector_id} not found")
result = data_array[0]
columns = columns = [col.name for col in results.manifest.columns] if results.manifest and results.manifest.columns else []
row_data = dict(zip(columns, result))
# Build payload following the standard schema
payload = {
"hash": row_data.get("hash", "unknown"),
"data": row_data.get("memory", row_data.get("data", "unknown")),
"created_at": row_data.get("created_at"),
}
# Add updated_at if available
if "updated_at" in row_data:
payload["updated_at"] = row_data.get("updated_at")
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in row_data:
payload[field] = row_data[field]
# Add metadata
if "metadata" in row_data and row_data.get('metadata'):
try:
metadata = json.loads(extract_json(row_data["metadata"]))
payload.update(metadata)
except (json.JSONDecodeError, TypeError):
logger.warning(f"Failed to parse metadata: {row_data.get('metadata')}")
memory_id = row_data.get("memory_id", row_data.get("memory_id", vector_id))
return MemoryResult(id=memory_id, payload=payload)
except Exception as e:
logger.error(f"Failed to get vector with ID {vector_id}: {e}")
raise
def list_cols(self) -> List[str]:
"""
List all collections (indexes).
Returns:
List of index names.
"""
try:
indexes = self.client.vector_search_indexes.list_indexes(endpoint_name=self.endpoint_name)
return [idx.name for idx in indexes]
except Exception as e:
logger.error(f"Failed to list collections: {e}")
raise
def delete_col(self):
"""
Delete the current collection (index).
"""
try:
# Try fully qualified first
try:
self.client.vector_search_indexes.delete_index(index_name=self.fully_qualified_index_name)
logger.info(f"Successfully deleted index '{self.fully_qualified_index_name}'")
except Exception:
self.client.vector_search_indexes.delete_index(index_name=self.index_name)
logger.info(f"Successfully deleted index '{self.index_name}' (short name)")
except Exception as e:
logger.error(f"Failed to delete index '{self.index_name}': {e}")
raise
def col_info(self, name=None):
"""
Get information about a collection (index).
Args:
name (str, optional): Index name. Defaults to current index.
Returns:
Dict: Index information.
"""
try:
index_name = name or self.index_name
index = self.client.vector_search_indexes.get_index(index_name=index_name)
return {"name": index.name, "fields": self.columns}
except Exception as e:
logger.error(f"Failed to get info for index '{name or self.index_name}': {e}")
raise
def list(self, filters: dict = None, limit: int = None) -> list[MemoryResult]:
"""
List all recent created memories from the vector store.
Args:
filters (dict, optional): Filters to apply.
limit (int, optional): Maximum number of results.
Returns:
List containing list of MemoryResult objects.
"""
try:
filters_json = json.dumps(filters) if filters else None
num_results = limit or 100
columns = self.column_names
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=columns,
query_text=" ",
num_results=num_results,
query_type=self.query_type,
filters_json=filters_json,
)
result_data = sdk_results.result if hasattr(sdk_results, "result") else sdk_results
data_array = result_data.data_array if hasattr(result_data, "data_array") else []
memory_results = []
for row in data_array:
row_dict = dict(zip(columns, row)) if isinstance(row, (list, tuple)) else row
payload = {k: row_dict.get(k) for k in columns}
# Parse metadata if present
if "metadata" in payload and payload["metadata"]:
try:
payload.update(json.loads(payload["metadata"]))
except Exception:
pass
memory_id = row_dict.get("memory_id") or row_dict.get("id")
payload['data'] = payload['memory']
memory_results.append(MemoryResult(id=memory_id, payload=payload))
return [memory_results]
except Exception as e:
logger.error(f"Failed to list memories: {e}")
return []
def reset(self):
"""Reset the vector search index and underlying source table.
This will attempt to delete the existing index (both fully qualified and short name forms
for robustness), drop the backing Delta table, recreate the table with the expected schema,
and finally recreate the index. Use with caution as all existing data will be removed.
"""
fq_index = self.fully_qualified_index_name
logger.warning(f"Resetting Databricks vector search index '{fq_index}'...")
try:
# Try deleting via fully qualified name first
try:
self.client.vector_search_indexes.delete_index(index_name=fq_index)
logger.info(f"Deleted index '{fq_index}'")
except Exception as e_fq:
logger.debug(f"Failed deleting fully qualified index name '{fq_index}': {e_fq}. Trying short name...")
try:
# Fallback to existing helper which may use short name
self.delete_col()
except Exception as e_short:
logger.debug(f"Failed deleting short index name '{self.index_name}': {e_short}")
# Drop the backing table (if it exists)
try:
drop_sql = f"DROP TABLE IF EXISTS {self.fully_qualified_table_name}"
resp = self.client.statement_execution.execute_statement(
statement=drop_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if getattr(resp.status, "state", None) == "SUCCEEDED":
logger.info(f"Dropped table '{self.fully_qualified_table_name}'")
else:
logger.warning(
f"Attempted to drop table '{self.fully_qualified_table_name}' but state was {getattr(resp.status, 'state', 'UNKNOWN')}: {getattr(resp.status, 'error', None)}"
)
except Exception as e_drop:
logger.warning(f"Failed to drop table '{self.fully_qualified_table_name}': {e_drop}")
# Recreate table & index
self._ensure_source_table_exists()
self.create_col()
logger.info(f"Successfully reset index '{fq_index}'")
except Exception as e:
logger.error(f"Error resetting index '{fq_index}': {e}")
raise
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/databricks.py",
"license": "Apache License 2.0",
"lines": 683,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_databricks.py | from types import SimpleNamespace
from unittest.mock import MagicMock, patch
from databricks.sdk.service.vectorsearch import VectorIndexType, QueryVectorIndexResponse, ResultManifest, ResultData, ColumnInfo
from mem0.vector_stores.databricks import Databricks
import pytest
# ---------------------- Fixtures ---------------------- #
def _make_status(state="SUCCEEDED", error=None):
return SimpleNamespace(state=SimpleNamespace(value=state), error=error)
def _make_exec_response(state="SUCCEEDED", error=None):
return SimpleNamespace(status=_make_status(state, error))
@pytest.fixture
def mock_workspace_client():
"""Patch WorkspaceClient and provide a fully mocked client with required sub-clients."""
with patch("mem0.vector_stores.databricks.WorkspaceClient") as mock_wc_cls:
mock_wc = MagicMock(name="WorkspaceClient")
# warehouses.list -> iterable of objects with name/id
warehouse_obj = SimpleNamespace(name="test-warehouse", id="wh-123")
mock_wc.warehouses.list.return_value = [warehouse_obj]
# vector search endpoints
mock_wc.vector_search_endpoints.get_endpoint.side_effect = [Exception("not found"), MagicMock()]
mock_wc.vector_search_endpoints.create_endpoint_and_wait.return_value = None
# tables.exists
exists_obj = SimpleNamespace(table_exists=False)
mock_wc.tables.exists.return_value = exists_obj
mock_wc.tables.create.return_value = None
mock_wc.table_constraints.create.return_value = None
# vector_search_indexes list/create/query/delete
mock_wc.vector_search_indexes.list_indexes.return_value = []
mock_wc.vector_search_indexes.create_index.return_value = SimpleNamespace(name="catalog.schema.mem0")
mock_wc.vector_search_indexes.query_index.return_value = SimpleNamespace(result=SimpleNamespace(data_array=[]))
mock_wc.vector_search_indexes.delete_index.return_value = None
mock_wc.vector_search_indexes.get_index.return_value = SimpleNamespace(name="mem0")
# statement execution
mock_wc.statement_execution.execute_statement.return_value = _make_exec_response()
mock_wc_cls.return_value = mock_wc
yield mock_wc
@pytest.fixture
def db_instance_delta(mock_workspace_client):
return Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DELTA_SYNC,
embedding_model_endpoint_name="embedding-endpoint",
)
@pytest.fixture
def db_instance_direct(mock_workspace_client):
# For DIRECT_ACCESS we want table exists path to skip creation; adjust mock first
mock_workspace_client.tables.exists.return_value = SimpleNamespace(table_exists=True)
return Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DIRECT_ACCESS,
embedding_dimension=4,
embedding_model_endpoint_name="embedding-endpoint",
)
# ---------------------- Initialization Tests ---------------------- #
def test_initialization_delta_sync(db_instance_delta, mock_workspace_client):
# Endpoint ensure called (first attempt get_endpoint fails then create)
mock_workspace_client.vector_search_endpoints.create_endpoint_and_wait.assert_called_once()
# Table creation sequence
mock_workspace_client.tables.create.assert_called_once()
# Index created with expected args
assert (
mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["index_type"]
== VectorIndexType.DELTA_SYNC
)
assert mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["primary_key"] == "memory_id"
def test_initialization_direct_access(db_instance_direct, mock_workspace_client):
# DIRECT_ACCESS should include embedding column
assert "embedding" in db_instance_direct.column_names
assert (
mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["index_type"]
== VectorIndexType.DIRECT_ACCESS
)
def test_create_col_invalid_type(mock_workspace_client):
# Force invalid type by manually constructing and calling create_col after monkeypatching index_type
inst = Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DELTA_SYNC,
)
inst.index_type = "BAD_TYPE"
with pytest.raises(ValueError):
inst.create_col()
# ---------------------- Insert Tests ---------------------- #
def test_insert_generates_sql(db_instance_direct, mock_workspace_client):
vectors = [[0.1, 0.2, 0.3, 0.4]]
payloads = [
{
"data": "hello world",
"user_id": "u1",
"agent_id": "a1",
"run_id": "r1",
"metadata": '{"topic":"greeting"}',
"hash": "h1",
}
]
ids = ["id1"]
db_instance_direct.insert(vectors=vectors, payloads=payloads, ids=ids)
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs["statement"] if "statement" in kwargs else args[0]
assert "INSERT INTO" in sql
assert "catalog.schema.table" in sql
assert "id1" in sql
# Embedding list rendered
assert "array(0.1, 0.2, 0.3, 0.4)" in sql
# ---------------------- Search Tests ---------------------- #
def test_search_delta_sync_text(db_instance_delta, mock_workspace_client):
# Simulate query results
row = [
"id1",
"hash1",
"agent1",
"run1",
"user1",
"memory text",
'{"topic":"greeting"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
0.42,
]
mock_workspace_client.vector_search_indexes.query_index.return_value = SimpleNamespace(
result=SimpleNamespace(data_array=[row])
)
results = db_instance_delta.search(query="hello", vectors=None, limit=1)
mock_workspace_client.vector_search_indexes.query_index.assert_called_once()
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.42
assert results[0].payload["data"] == "memory text"
def test_search_direct_access_vector(db_instance_direct, mock_workspace_client):
row = [
"id2",
"hash2",
"agent2",
"run2",
"user2",
"memory two",
'{"topic":"info"}',
"2024-01-02T00:00:00",
"2024-01-02T00:00:00",
[0.1, 0.2, 0.3, 0.4],
0.77,
]
mock_workspace_client.vector_search_indexes.query_index.return_value = SimpleNamespace(
result=SimpleNamespace(data_array=[row])
)
results = db_instance_direct.search(query="", vectors=[0.1, 0.2, 0.3, 0.4], limit=1)
assert len(results) == 1
assert results[0].id == "id2"
assert results[0].score == 0.77
def test_search_missing_params_raises(db_instance_delta):
with pytest.raises(ValueError):
db_instance_delta.search(query="", vectors=[0.1, 0.2]) # DELTA_SYNC requires query text
# ---------------------- Delete Tests ---------------------- #
def test_delete_vector(db_instance_delta, mock_workspace_client):
db_instance_delta.delete("id-delete")
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs.get("statement") or args[0]
assert "DELETE FROM" in sql and "id-delete" in sql
# ---------------------- Update Tests ---------------------- #
def test_update_vector(db_instance_direct, mock_workspace_client):
db_instance_direct.update(
vector_id="id-upd",
vector=[0.4, 0.5, 0.6, 0.7],
payload={"custom": "val", "user_id": "skip"}, # user_id should be excluded
)
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs.get("statement") or args[0]
assert "UPDATE" in sql and "id-upd" in sql
assert "embedding = [0.4, 0.5, 0.6, 0.7]" in sql
assert "custom = 'val'" in sql
assert "user_id" not in sql # excluded
# ---------------------- Get Tests ---------------------- #
def test_get_vector(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.query_index.return_value = QueryVectorIndexResponse(
manifest=ResultManifest(columns=[
ColumnInfo(name="memory_id"),
ColumnInfo(name="hash"),
ColumnInfo(name="agent_id"),
ColumnInfo(name="run_id"),
ColumnInfo(name="user_id"),
ColumnInfo(name="memory"),
ColumnInfo(name="metadata"),
ColumnInfo(name="created_at"),
ColumnInfo(name="updated_at"),
ColumnInfo(name="score"),
]),
result=ResultData(
data_array=[
[
"id-get",
"h",
"a",
"r",
"u",
"some memory",
'{"tag":"x"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
"0.99",
]
]
)
)
res = db_instance_delta.get("id-get")
assert res.id == "id-get"
assert res.payload["data"] == "some memory"
assert res.payload["tag"] == "x"
# ---------------------- Collection Info / Listing Tests ---------------------- #
def test_list_cols(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.list_indexes.return_value = [
SimpleNamespace(name="catalog.schema.mem0"),
SimpleNamespace(name="catalog.schema.other"),
]
cols = db_instance_delta.list_cols()
assert "catalog.schema.mem0" in cols and "catalog.schema.other" in cols
def test_col_info(db_instance_delta):
info = db_instance_delta.col_info()
assert info["name"] == "mem0"
assert any(col.name == "memory_id" for col in info["fields"])
def test_list_memories(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.query_index.return_value = QueryVectorIndexResponse(
manifest=ResultManifest(columns=[
ColumnInfo(name="memory_id"),
ColumnInfo(name="hash"),
ColumnInfo(name="agent_id"),
ColumnInfo(name="run_id"),
ColumnInfo(name="user_id"),
ColumnInfo(name="memory"),
ColumnInfo(name="metadata"),
ColumnInfo(name="created_at"),
ColumnInfo(name="updated_at"),
ColumnInfo(name="score"),
]),
result=ResultData(
data_array=[
[
"id-get",
"h",
"a",
"r",
"u",
"some memory",
'{"tag":"x"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
"0.99",
]
]
)
)
res = db_instance_delta.list(limit=1)
assert isinstance(res, list)
assert len(res[0]) == 1
assert res[0][0].id == "id-get"
# ---------------------- Reset Tests ---------------------- #
def test_reset(db_instance_delta, mock_workspace_client):
# Make delete raise to exercise fallback path then allow recreation
mock_workspace_client.vector_search_indexes.delete_index.side_effect = [Exception("fail fq"), None, None]
with patch.object(db_instance_delta, "create_col", wraps=db_instance_delta.create_col) as create_spy:
db_instance_delta.reset()
assert create_spy.called
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_databricks.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/memory/kuzu_memory.py | import logging
from mem0.memory.utils import format_entities
try:
import kuzu
except ImportError:
raise ImportError("kuzu is not installed. Please install it using pip install kuzu")
try:
from rank_bm25 import BM25Okapi
except ImportError:
raise ImportError("rank_bm25 is not installed. Please install it using pip install rank-bm25")
from mem0.graphs.tools import (
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
DELETE_MEMORY_TOOL_GRAPH,
EXTRACT_ENTITIES_STRUCT_TOOL,
EXTRACT_ENTITIES_TOOL,
RELATIONS_STRUCT_TOOL,
RELATIONS_TOOL,
)
from mem0.graphs.utils import EXTRACT_RELATIONS_PROMPT, get_delete_messages
from mem0.utils.factory import EmbedderFactory, LlmFactory
logger = logging.getLogger(__name__)
class MemoryGraph:
def __init__(self, config):
self.config = config
self.embedding_model = EmbedderFactory.create(
self.config.embedder.provider,
self.config.embedder.config,
self.config.vector_store.config,
)
self.embedding_dims = self.embedding_model.config.embedding_dims
if self.embedding_dims is None or self.embedding_dims <= 0:
raise ValueError(f"embedding_dims must be a positive integer. Given: {self.embedding_dims}")
self.db = kuzu.Database(self.config.graph_store.config.db)
self.graph = kuzu.Connection(self.db)
self.node_label = ":Entity"
self.rel_label = ":CONNECTED_TO"
self.kuzu_create_schema()
# Default to openai if no specific provider is configured
self.llm_provider = "openai"
if self.config.llm and self.config.llm.provider:
self.llm_provider = self.config.llm.provider
if self.config.graph_store and self.config.graph_store.llm and self.config.graph_store.llm.provider:
self.llm_provider = self.config.graph_store.llm.provider
# Get LLM config with proper null checks
llm_config = None
if self.config.graph_store and self.config.graph_store.llm and hasattr(self.config.graph_store.llm, "config"):
llm_config = self.config.graph_store.llm.config
elif hasattr(self.config.llm, "config"):
llm_config = self.config.llm.config
self.llm = LlmFactory.create(self.llm_provider, llm_config)
self.user_id = None
# Use threshold from graph_store config, default to 0.7 for backward compatibility
self.threshold = self.config.graph_store.threshold if hasattr(self.config.graph_store, 'threshold') else 0.7
def kuzu_create_schema(self):
self.kuzu_execute(
"""
CREATE NODE TABLE IF NOT EXISTS Entity(
id SERIAL PRIMARY KEY,
user_id STRING,
agent_id STRING,
run_id STRING,
name STRING,
mentions INT64,
created TIMESTAMP,
embedding FLOAT[]);
"""
)
self.kuzu_execute(
"""
CREATE REL TABLE IF NOT EXISTS CONNECTED_TO(
FROM Entity TO Entity,
name STRING,
mentions INT64,
created TIMESTAMP,
updated TIMESTAMP
);
"""
)
def kuzu_execute(self, query, parameters=None):
results = self.graph.execute(query, parameters)
return list(results.rows_as_dict())
def add(self, data, filters):
"""
Adds data to the graph.
Args:
data (str): The data to add to the graph.
filters (dict): A dictionary containing filters to be applied during the addition.
"""
entity_type_map = self._retrieve_nodes_from_data(data, filters)
to_be_added = self._establish_nodes_relations_from_data(data, filters, entity_type_map)
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
to_be_deleted = self._get_delete_entities_from_search_output(search_output, data, filters)
deleted_entities = self._delete_entities(to_be_deleted, filters)
added_entities = self._add_entities(to_be_added, filters, entity_type_map)
return {"deleted_entities": deleted_entities, "added_entities": added_entities}
def search(self, query, filters, limit=5):
"""
Search for memories and related graph data.
Args:
query (str): Query to search for.
filters (dict): A dictionary containing filters to be applied during the search.
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
Returns:
dict: A dictionary containing:
- "contexts": List of search results from the base data store.
- "entities": List of related graph data based on the query.
"""
entity_type_map = self._retrieve_nodes_from_data(query, filters)
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
if not search_output:
return []
search_outputs_sequence = [
[item["source"], item["relationship"], item["destination"]] for item in search_output
]
bm25 = BM25Okapi(search_outputs_sequence)
tokenized_query = query.split(" ")
reranked_results = bm25.get_top_n(tokenized_query, search_outputs_sequence, n=limit)
search_results = []
for item in reranked_results:
search_results.append({"source": item[0], "relationship": item[1], "destination": item[2]})
logger.info(f"Returned {len(search_results)} search results")
return search_results
def delete_all(self, filters):
# Build node properties for filtering
node_props = ["user_id: $user_id"]
if filters.get("agent_id"):
node_props.append("agent_id: $agent_id")
if filters.get("run_id"):
node_props.append("run_id: $run_id")
node_props_str = ", ".join(node_props)
cypher = f"""
MATCH (n {self.node_label} {{{node_props_str}}})
DETACH DELETE n
"""
params = {"user_id": filters["user_id"]}
if filters.get("agent_id"):
params["agent_id"] = filters["agent_id"]
if filters.get("run_id"):
params["run_id"] = filters["run_id"]
self.kuzu_execute(cypher, parameters=params)
def get_all(self, filters, limit=100):
"""
Retrieves all nodes and relationships from the graph database based on optional filtering criteria.
Args:
filters (dict): A dictionary containing filters to be applied during the retrieval.
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
Returns:
list: A list of dictionaries, each containing:
- 'contexts': The base data store response for each memory.
- 'entities': A list of strings representing the nodes and relationships
"""
params = {
"user_id": filters["user_id"],
"limit": limit,
}
# Build node properties based on filters
node_props = ["user_id: $user_id"]
if filters.get("agent_id"):
node_props.append("agent_id: $agent_id")
params["agent_id"] = filters["agent_id"]
if filters.get("run_id"):
node_props.append("run_id: $run_id")
params["run_id"] = filters["run_id"]
node_props_str = ", ".join(node_props)
query = f"""
MATCH (n {self.node_label} {{{node_props_str}}})-[r]->(m {self.node_label} {{{node_props_str}}})
RETURN
n.name AS source,
r.name AS relationship,
m.name AS target
LIMIT $limit
"""
results = self.kuzu_execute(query, parameters=params)
final_results = []
for result in results:
final_results.append(
{
"source": result["source"],
"relationship": result["relationship"],
"target": result["target"],
}
)
logger.info(f"Retrieved {len(final_results)} relationships")
return final_results
def _retrieve_nodes_from_data(self, data, filters):
"""Extracts all the entities mentioned in the query."""
_tools = [EXTRACT_ENTITIES_TOOL]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [EXTRACT_ENTITIES_STRUCT_TOOL]
search_results = self.llm.generate_response(
messages=[
{
"role": "system",
"content": f"You are a smart assistant who understands entities and their types in a given text. If user message contains self reference such as 'I', 'me', 'my' etc. then use {filters['user_id']} as the source entity. Extract all the entities from the text. ***DO NOT*** answer the question itself if the given text is a question.",
},
{"role": "user", "content": data},
],
tools=_tools,
)
entity_type_map = {}
try:
for tool_call in search_results["tool_calls"]:
if tool_call["name"] != "extract_entities":
continue
for item in tool_call["arguments"]["entities"]:
entity_type_map[item["entity"]] = item["entity_type"]
except Exception as e:
logger.exception(
f"Error in search tool: {e}, llm_provider={self.llm_provider}, search_results={search_results}"
)
entity_type_map = {k.lower().replace(" ", "_"): v.lower().replace(" ", "_") for k, v in entity_type_map.items()}
logger.debug(f"Entity type map: {entity_type_map}\n search_results={search_results}")
return entity_type_map
def _establish_nodes_relations_from_data(self, data, filters, entity_type_map):
"""Establish relations among the extracted nodes."""
# Compose user identification string for prompt
user_identity = f"user_id: {filters['user_id']}"
if filters.get("agent_id"):
user_identity += f", agent_id: {filters['agent_id']}"
if filters.get("run_id"):
user_identity += f", run_id: {filters['run_id']}"
if self.config.graph_store.custom_prompt:
system_content = EXTRACT_RELATIONS_PROMPT.replace("USER_ID", user_identity)
# Add the custom prompt line if configured
system_content = system_content.replace("CUSTOM_PROMPT", f"4. {self.config.graph_store.custom_prompt}")
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": data},
]
else:
system_content = EXTRACT_RELATIONS_PROMPT.replace("USER_ID", user_identity)
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": f"List of entities: {list(entity_type_map.keys())}. \n\nText: {data}"},
]
_tools = [RELATIONS_TOOL]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [RELATIONS_STRUCT_TOOL]
extracted_entities = self.llm.generate_response(
messages=messages,
tools=_tools,
)
entities = []
if extracted_entities.get("tool_calls"):
entities = extracted_entities["tool_calls"][0].get("arguments", {}).get("entities", [])
entities = self._remove_spaces_from_entities(entities)
logger.debug(f"Extracted entities: {entities}")
return entities
def _search_graph_db(self, node_list, filters, limit=100, threshold=None):
"""Search similar nodes among and their respective incoming and outgoing relations."""
result_relations = []
params = {
"threshold": threshold if threshold else self.threshold,
"user_id": filters["user_id"],
"limit": limit,
}
# Build node properties for filtering
node_props = ["user_id: $user_id"]
if filters.get("agent_id"):
node_props.append("agent_id: $agent_id")
params["agent_id"] = filters["agent_id"]
if filters.get("run_id"):
node_props.append("run_id: $run_id")
params["run_id"] = filters["run_id"]
node_props_str = ", ".join(node_props)
for node in node_list:
n_embedding = self.embedding_model.embed(node)
params["n_embedding"] = n_embedding
results = []
for match_fragment in [
f"(n)-[r]->(m {self.node_label} {{{node_props_str}}}) WITH n as src, r, m as dst, similarity",
f"(m {self.node_label} {{{node_props_str}}})-[r]->(n) WITH m as src, r, n as dst, similarity"
]:
results.extend(self.kuzu_execute(
f"""
MATCH (n {self.node_label} {{{node_props_str}}})
WHERE n.embedding IS NOT NULL
WITH n, array_cosine_similarity(n.embedding, CAST($n_embedding,'FLOAT[{self.embedding_dims}]')) AS similarity
WHERE similarity >= CAST($threshold, 'DOUBLE')
MATCH {match_fragment}
RETURN
src.name AS source,
id(src) AS source_id,
r.name AS relationship,
id(r) AS relation_id,
dst.name AS destination,
id(dst) AS destination_id,
similarity
LIMIT $limit
""",
parameters=params))
# Kuzu does not support sort/limit over unions. Do it manually for now.
result_relations.extend(sorted(results, key=lambda x: x["similarity"], reverse=True)[:limit])
return result_relations
def _get_delete_entities_from_search_output(self, search_output, data, filters):
"""Get the entities to be deleted from the search output."""
search_output_string = format_entities(search_output)
# Compose user identification string for prompt
user_identity = f"user_id: {filters['user_id']}"
if filters.get("agent_id"):
user_identity += f", agent_id: {filters['agent_id']}"
if filters.get("run_id"):
user_identity += f", run_id: {filters['run_id']}"
system_prompt, user_prompt = get_delete_messages(search_output_string, data, user_identity)
_tools = [DELETE_MEMORY_TOOL_GRAPH]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
]
memory_updates = self.llm.generate_response(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
tools=_tools,
)
to_be_deleted = []
for item in memory_updates.get("tool_calls", []):
if item.get("name") == "delete_graph_memory":
to_be_deleted.append(item.get("arguments"))
# Clean entities formatting
to_be_deleted = self._remove_spaces_from_entities(to_be_deleted)
logger.debug(f"Deleted relationships: {to_be_deleted}")
return to_be_deleted
def _delete_entities(self, to_be_deleted, filters):
"""Delete the entities from the graph."""
user_id = filters["user_id"]
agent_id = filters.get("agent_id", None)
run_id = filters.get("run_id", None)
results = []
for item in to_be_deleted:
source = item["source"]
destination = item["destination"]
relationship = item["relationship"]
params = {
"source_name": source,
"dest_name": destination,
"user_id": user_id,
"relationship_name": relationship,
}
# Build node properties for filtering
source_props = ["name: $source_name", "user_id: $user_id"]
dest_props = ["name: $dest_name", "user_id: $user_id"]
if agent_id:
source_props.append("agent_id: $agent_id")
dest_props.append("agent_id: $agent_id")
params["agent_id"] = agent_id
if run_id:
source_props.append("run_id: $run_id")
dest_props.append("run_id: $run_id")
params["run_id"] = run_id
source_props_str = ", ".join(source_props)
dest_props_str = ", ".join(dest_props)
# Delete the specific relationship between nodes
cypher = f"""
MATCH (n {self.node_label} {{{source_props_str}}})
-[r {self.rel_label} {{name: $relationship_name}}]->
(m {self.node_label} {{{dest_props_str}}})
DELETE r
RETURN
n.name AS source,
r.name AS relationship,
m.name AS target
"""
result = self.kuzu_execute(cypher, parameters=params)
results.append(result)
return results
def _add_entities(self, to_be_added, filters, entity_type_map):
"""Add the new entities to the graph. Merge the nodes if they already exist."""
user_id = filters["user_id"]
agent_id = filters.get("agent_id", None)
run_id = filters.get("run_id", None)
results = []
for item in to_be_added:
# entities
source = item["source"]
source_label = self.node_label
destination = item["destination"]
destination_label = self.node_label
relationship = item["relationship"]
relationship_label = self.rel_label
# embeddings
source_embedding = self.embedding_model.embed(source)
dest_embedding = self.embedding_model.embed(destination)
# search for the nodes with the closest embeddings
source_node_search_result = self._search_source_node(source_embedding, filters, threshold=self.threshold)
destination_node_search_result = self._search_destination_node(dest_embedding, filters, threshold=self.threshold)
if not destination_node_search_result and source_node_search_result:
params = {
"table_id": source_node_search_result[0]["id"]["table"],
"offset_id": source_node_search_result[0]["id"]["offset"],
"destination_name": destination,
"destination_embedding": dest_embedding,
"relationship_name": relationship,
"user_id": user_id,
}
# Build source MERGE properties
merge_props = ["name: $destination_name", "user_id: $user_id"]
if agent_id:
merge_props.append("agent_id: $agent_id")
params["agent_id"] = agent_id
if run_id:
merge_props.append("run_id: $run_id")
params["run_id"] = run_id
merge_props_str = ", ".join(merge_props)
cypher = f"""
MATCH (source)
WHERE id(source) = internal_id($table_id, $offset_id)
SET source.mentions = coalesce(source.mentions, 0) + 1
WITH source
MERGE (destination {destination_label} {{{merge_props_str}}})
ON CREATE SET
destination.created = current_timestamp(),
destination.mentions = 1,
destination.embedding = CAST($destination_embedding,'FLOAT[{self.embedding_dims}]')
ON MATCH SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.embedding = CAST($destination_embedding,'FLOAT[{self.embedding_dims}]')
WITH source, destination
MERGE (source)-[r {relationship_label} {{name: $relationship_name}}]->(destination)
ON CREATE SET
r.created = current_timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1
RETURN
source.name AS source,
r.name AS relationship,
destination.name AS target
"""
elif destination_node_search_result and not source_node_search_result:
params = {
"table_id": destination_node_search_result[0]["id"]["table"],
"offset_id": destination_node_search_result[0]["id"]["offset"],
"source_name": source,
"source_embedding": source_embedding,
"user_id": user_id,
"relationship_name": relationship,
}
# Build source MERGE properties
merge_props = ["name: $source_name", "user_id: $user_id"]
if agent_id:
merge_props.append("agent_id: $agent_id")
params["agent_id"] = agent_id
if run_id:
merge_props.append("run_id: $run_id")
params["run_id"] = run_id
merge_props_str = ", ".join(merge_props)
cypher = f"""
MATCH (destination)
WHERE id(destination) = internal_id($table_id, $offset_id)
SET destination.mentions = coalesce(destination.mentions, 0) + 1
WITH destination
MERGE (source {source_label} {{{merge_props_str}}})
ON CREATE SET
source.created = current_timestamp(),
source.mentions = 1,
source.embedding = CAST($source_embedding,'FLOAT[{self.embedding_dims}]')
ON MATCH SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.embedding = CAST($source_embedding,'FLOAT[{self.embedding_dims}]')
WITH source, destination
MERGE (source)-[r {relationship_label} {{name: $relationship_name}}]->(destination)
ON CREATE SET
r.created = current_timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1
RETURN
source.name AS source,
r.name AS relationship,
destination.name AS target
"""
elif source_node_search_result and destination_node_search_result:
cypher = f"""
MATCH (source)
WHERE id(source) = internal_id($src_table, $src_offset)
SET source.mentions = coalesce(source.mentions, 0) + 1
WITH source
MATCH (destination)
WHERE id(destination) = internal_id($dst_table, $dst_offset)
SET destination.mentions = coalesce(destination.mentions, 0) + 1
MERGE (source)-[r {relationship_label} {{name: $relationship_name}}]->(destination)
ON CREATE SET
r.created = current_timestamp(),
r.updated = current_timestamp(),
r.mentions = 1
ON MATCH SET r.mentions = coalesce(r.mentions, 0) + 1
RETURN
source.name AS source,
r.name AS relationship,
destination.name AS target
"""
params = {
"src_table": source_node_search_result[0]["id"]["table"],
"src_offset": source_node_search_result[0]["id"]["offset"],
"dst_table": destination_node_search_result[0]["id"]["table"],
"dst_offset": destination_node_search_result[0]["id"]["offset"],
"relationship_name": relationship,
}
else:
params = {
"source_name": source,
"dest_name": destination,
"relationship_name": relationship,
"source_embedding": source_embedding,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
# Build dynamic MERGE props for both source and destination
source_props = ["name: $source_name", "user_id: $user_id"]
dest_props = ["name: $dest_name", "user_id: $user_id"]
if agent_id:
source_props.append("agent_id: $agent_id")
dest_props.append("agent_id: $agent_id")
params["agent_id"] = agent_id
if run_id:
source_props.append("run_id: $run_id")
dest_props.append("run_id: $run_id")
params["run_id"] = run_id
source_props_str = ", ".join(source_props)
dest_props_str = ", ".join(dest_props)
cypher = f"""
MERGE (source {source_label} {{{source_props_str}}})
ON CREATE SET
source.created = current_timestamp(),
source.mentions = 1,
source.embedding = CAST($source_embedding,'FLOAT[{self.embedding_dims}]')
ON MATCH SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.embedding = CAST($source_embedding,'FLOAT[{self.embedding_dims}]')
WITH source
MERGE (destination {destination_label} {{{dest_props_str}}})
ON CREATE SET
destination.created = current_timestamp(),
destination.mentions = 1,
destination.embedding = CAST($dest_embedding,'FLOAT[{self.embedding_dims}]')
ON MATCH SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.embedding = CAST($dest_embedding,'FLOAT[{self.embedding_dims}]')
WITH source, destination
MERGE (source)-[rel {relationship_label} {{name: $relationship_name}}]->(destination)
ON CREATE SET
rel.created = current_timestamp(),
rel.mentions = 1
ON MATCH SET
rel.mentions = coalesce(rel.mentions, 0) + 1
RETURN
source.name AS source,
rel.name AS relationship,
destination.name AS target
"""
result = self.kuzu_execute(cypher, parameters=params)
results.append(result)
return results
def _remove_spaces_from_entities(self, entity_list):
for item in entity_list:
item["source"] = item["source"].lower().replace(" ", "_")
item["relationship"] = item["relationship"].lower().replace(" ", "_")
item["destination"] = item["destination"].lower().replace(" ", "_")
return entity_list
def _search_source_node(self, source_embedding, filters, threshold=0.9):
params = {
"source_embedding": source_embedding,
"user_id": filters["user_id"],
"threshold": threshold,
}
where_conditions = ["source_candidate.embedding IS NOT NULL", "source_candidate.user_id = $user_id"]
if filters.get("agent_id"):
where_conditions.append("source_candidate.agent_id = $agent_id")
params["agent_id"] = filters["agent_id"]
if filters.get("run_id"):
where_conditions.append("source_candidate.run_id = $run_id")
params["run_id"] = filters["run_id"]
where_clause = " AND ".join(where_conditions)
cypher = f"""
MATCH (source_candidate {self.node_label})
WHERE {where_clause}
WITH source_candidate,
array_cosine_similarity(source_candidate.embedding, CAST($source_embedding,'FLOAT[{self.embedding_dims}]')) AS source_similarity
WHERE source_similarity >= $threshold
WITH source_candidate, source_similarity
ORDER BY source_similarity DESC
LIMIT 2
RETURN id(source_candidate) as id, source_similarity
"""
return self.kuzu_execute(cypher, parameters=params)
def _search_destination_node(self, destination_embedding, filters, threshold=0.9):
params = {
"destination_embedding": destination_embedding,
"user_id": filters["user_id"],
"threshold": threshold,
}
where_conditions = ["destination_candidate.embedding IS NOT NULL", "destination_candidate.user_id = $user_id"]
if filters.get("agent_id"):
where_conditions.append("destination_candidate.agent_id = $agent_id")
params["agent_id"] = filters["agent_id"]
if filters.get("run_id"):
where_conditions.append("destination_candidate.run_id = $run_id")
params["run_id"] = filters["run_id"]
where_clause = " AND ".join(where_conditions)
cypher = f"""
MATCH (destination_candidate {self.node_label})
WHERE {where_clause}
WITH destination_candidate,
array_cosine_similarity(destination_candidate.embedding, CAST($destination_embedding,'FLOAT[{self.embedding_dims}]')) AS destination_similarity
WHERE destination_similarity >= $threshold
WITH destination_candidate, destination_similarity
ORDER BY destination_similarity DESC
LIMIT 2
RETURN id(destination_candidate) as id, destination_similarity
"""
return self.kuzu_execute(cypher, parameters=params)
# Reset is not defined in base.py
def reset(self):
"""Reset the graph by clearing all nodes and relationships."""
logger.warning("Clearing graph...")
cypher_query = """
MATCH (n) DETACH DELETE n
"""
return self.kuzu_execute(cypher_query)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/memory/kuzu_memory.py",
"license": "Apache License 2.0",
"lines": 621,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/memory/test_kuzu.py | import numpy as np
import pytest
from unittest.mock import Mock, patch
from mem0.memory.kuzu_memory import MemoryGraph
class TestKuzu:
"""Test that Kuzu memory works correctly"""
# Create distinct embeddings that won't match with threshold=0.7
# Each embedding is mostly zeros with ones in different positions to ensure low similarity
alice_emb = np.zeros(384)
alice_emb[0:96] = 1.0
bob_emb = np.zeros(384)
bob_emb[96:192] = 1.0
charlie_emb = np.zeros(384)
charlie_emb[192:288] = 1.0
dave_emb = np.zeros(384)
dave_emb[288:384] = 1.0
embeddings = {
"alice": alice_emb.tolist(),
"bob": bob_emb.tolist(),
"charlie": charlie_emb.tolist(),
"dave": dave_emb.tolist(),
}
@pytest.fixture
def mock_config(self):
"""Create a mock configuration for testing"""
config = Mock()
# Mock embedder config
config.embedder.provider = "mock_embedder"
config.embedder.config = {"model": "mock_model"}
config.vector_store.config = {"dimensions": 384}
# Mock graph store config
config.graph_store.config.db = ":memory:"
config.graph_store.threshold = 0.7
# Mock LLM config
config.llm.provider = "mock_llm"
config.llm.config = {"api_key": "test_key"}
return config
@pytest.fixture
def mock_embedding_model(self):
"""Create a mock embedding model"""
mock_model = Mock()
mock_model.config.embedding_dims = 384
def mock_embed(text):
return self.embeddings[text]
mock_model.embed.side_effect = mock_embed
return mock_model
@pytest.fixture
def mock_llm(self):
"""Create a mock LLM"""
mock_llm = Mock()
mock_llm.generate_response.return_value = {
"tool_calls": [
{
"name": "extract_entities",
"arguments": {"entities": [{"entity": "test_entity", "entity_type": "test_type"}]},
}
]
}
return mock_llm
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
@patch("mem0.memory.kuzu_memory.LlmFactory")
def test_kuzu_memory_initialization(
self, mock_llm_factory, mock_embedder_factory, mock_config, mock_embedding_model, mock_llm
):
"""Test that Kuzu memory initializes correctly"""
# Setup mocks
mock_embedder_factory.create.return_value = mock_embedding_model
mock_llm_factory.create.return_value = mock_llm
# Create instance
kuzu_memory = MemoryGraph(mock_config)
# Verify initialization
assert kuzu_memory.config == mock_config
assert kuzu_memory.embedding_model == mock_embedding_model
assert kuzu_memory.embedding_dims == 384
assert kuzu_memory.llm == mock_llm
assert kuzu_memory.threshold == 0.7
@pytest.mark.parametrize(
"embedding_dims",
[None, 0, -1],
)
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
def test_kuzu_memory_initialization_invalid_embedding_dims(
self, mock_embedder_factory, embedding_dims, mock_config
):
"""Test that Kuzu memory raises ValuError when initialized with invalid embedding_dims"""
# Setup mocks
mock_embedding_model = Mock()
mock_embedding_model.config.embedding_dims = embedding_dims
mock_embedder_factory.create.return_value = mock_embedding_model
with pytest.raises(ValueError, match="must be a positive"):
MemoryGraph(mock_config)
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
@patch("mem0.memory.kuzu_memory.LlmFactory")
def test_kuzu(self, mock_llm_factory, mock_embedder_factory, mock_config, mock_embedding_model, mock_llm):
"""Test adding memory to the graph"""
mock_embedder_factory.create.return_value = mock_embedding_model
mock_llm_factory.create.return_value = mock_llm
kuzu_memory = MemoryGraph(mock_config)
filters = {"user_id": "test_user", "agent_id": "test_agent", "run_id": "test_run"}
data1 = [
{"source": "alice", "destination": "bob", "relationship": "knows"},
{"source": "bob", "destination": "charlie", "relationship": "knows"},
{"source": "charlie", "destination": "alice", "relationship": "knows"},
]
data2 = [
{"source": "charlie", "destination": "alice", "relationship": "likes"},
]
result = kuzu_memory._add_entities(data1, filters, {})
assert result[0] == [{"source": "alice", "relationship": "knows", "target": "bob"}]
assert result[1] == [{"source": "bob", "relationship": "knows", "target": "charlie"}]
assert result[2] == [{"source": "charlie", "relationship": "knows", "target": "alice"}]
assert get_node_count(kuzu_memory) == 3
assert get_edge_count(kuzu_memory) == 3
result = kuzu_memory._add_entities(data2, filters, {})
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 3
assert get_edge_count(kuzu_memory) == 4
data3 = [
{"source": "dave", "destination": "alice", "relationship": "admires"}
]
result = kuzu_memory._add_entities(data3, filters, {})
assert result[0] == [{"source": "dave", "relationship": "admires", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4 # dave is new
assert get_edge_count(kuzu_memory) == 5
results = kuzu_memory.get_all(filters)
assert set([f"{result['source']}_{result['relationship']}_{result['target']}" for result in results]) == set([
"alice_knows_bob",
"bob_knows_charlie",
"charlie_likes_alice",
"charlie_knows_alice",
"dave_admires_alice"
])
results = kuzu_memory._search_graph_db(["bob"], filters, threshold=0.8)
assert set([f"{result['source']}_{result['relationship']}_{result['destination']}" for result in results]) == set([
"alice_knows_bob",
"bob_knows_charlie",
])
result = kuzu_memory._delete_entities(data2, filters)
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4
assert get_edge_count(kuzu_memory) == 4
result = kuzu_memory._delete_entities(data1, filters)
assert result[0] == [{"source": "alice", "relationship": "knows", "target": "bob"}]
assert result[1] == [{"source": "bob", "relationship": "knows", "target": "charlie"}]
assert result[2] == [{"source": "charlie", "relationship": "knows", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4
assert get_edge_count(kuzu_memory) == 1
result = kuzu_memory.delete_all(filters)
assert get_node_count(kuzu_memory) == 0
assert get_edge_count(kuzu_memory) == 0
result = kuzu_memory._add_entities(data2, filters, {})
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 2
assert get_edge_count(kuzu_memory) == 1
result = kuzu_memory.reset()
assert get_node_count(kuzu_memory) == 0
assert get_edge_count(kuzu_memory) == 0
def get_node_count(kuzu_memory):
results = kuzu_memory.kuzu_execute(
"""
MATCH (n)
RETURN COUNT(n) as count
"""
)
return int(results[0]['count'])
def get_edge_count(kuzu_memory):
results = kuzu_memory.kuzu_execute(
"""
MATCH (n)-[e]->(m)
RETURN COUNT(e) as count
"""
)
return int(results[0]['count'])
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_kuzu.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:tests/memory/test_neo4j_cypher_syntax.py | import os
from unittest.mock import Mock, patch
class TestNeo4jCypherSyntaxFix:
"""Test that Neo4j Cypher syntax fixes work correctly"""
def test_get_all_generates_valid_cypher_with_agent_id(self):
"""Test that get_all method generates valid Cypher with agent_id"""
# Mock the langchain_neo4j module to avoid import issues
with patch.dict('sys.modules', {'langchain_neo4j': Mock()}):
from mem0.memory.graph_memory import MemoryGraph
# Create instance (will fail on actual connection, but that's fine for syntax testing)
try:
_ = MemoryGraph(url="bolt://localhost:7687", username="test", password="test")
except Exception:
# Expected to fail on connection, just test the class exists
assert MemoryGraph is not None
return
def test_cypher_syntax_validation(self):
"""Test that our Cypher fixes don't contain problematic patterns"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Ensure the old buggy pattern is not present
assert "AND n.agent_id = $agent_id AND m.agent_id = $agent_id" not in content
assert "WHERE 1=1 {agent_filter}" not in content
# Ensure proper node property syntax is present
assert "node_props" in content
assert "agent_id: $agent_id" in content
# Ensure run_id follows the same pattern
# Check for absence of problematic run_id patterns
assert "AND n.run_id = $run_id AND m.run_id = $run_id" not in content
assert "WHERE 1=1 {run_id_filter}" not in content
def test_no_undefined_variables_in_cypher(self):
"""Test that we don't have undefined variable patterns"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check for patterns that would cause "Variable 'm' not defined" errors
lines = content.split('\n')
for i, line in enumerate(lines):
# Look for WHERE clauses that reference variables not in MATCH
if 'WHERE' in line and 'm.agent_id' in line:
# Check if there's a MATCH clause before this that defines 'm'
preceding_lines = lines[max(0, i-10):i]
match_found = any('MATCH' in prev_line and ' m ' in prev_line for prev_line in preceding_lines)
assert match_found, f"Line {i+1}: WHERE clause references 'm' without MATCH definition"
# Also check for run_id patterns that might have similar issues
if 'WHERE' in line and 'm.run_id' in line:
# Check if there's a MATCH clause before this that defines 'm'
preceding_lines = lines[max(0, i-10):i]
match_found = any('MATCH' in prev_line and ' m ' in prev_line for prev_line in preceding_lines)
assert match_found, f"Line {i+1}: WHERE clause references 'm.run_id' without MATCH definition"
def test_agent_id_integration_syntax(self):
"""Test that agent_id is properly integrated into MATCH clauses"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Should have node property building logic
assert 'node_props = [' in content
assert 'node_props.append("agent_id: $agent_id")' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Should use the node properties in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_run_id_integration_syntax(self):
"""Test that run_id is properly integrated into MATCH clauses"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Should have node property building logic for run_id
assert 'node_props = [' in content
assert 'node_props.append("run_id: $run_id")' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Should use the node properties in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_agent_id_filter_patterns(self):
"""Test that agent_id filtering follows the correct pattern"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that agent_id is handled in filters
assert 'if filters.get("agent_id"):' in content
assert 'params["agent_id"] = filters["agent_id"]' in content
# Check that agent_id is used in node properties
assert 'node_props.append("agent_id: $agent_id")' in content
def test_run_id_filter_patterns(self):
"""Test that run_id filtering follows the same pattern as agent_id"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that run_id is handled in filters
assert 'if filters.get("run_id"):' in content
assert 'params["run_id"] = filters["run_id"]' in content
# Check that run_id is used in node properties
assert 'node_props.append("run_id: $run_id")' in content
def test_agent_id_cypher_generation(self):
"""Test that agent_id is properly included in Cypher query generation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that the dynamic property building pattern exists
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Check that agent_id is handled in the pattern
assert 'if filters.get(' in content
assert 'node_props.append(' in content
# Verify the pattern is used in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_run_id_cypher_generation(self):
"""Test that run_id is properly included in Cypher query generation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that the dynamic property building pattern exists
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Check that run_id is handled in the pattern
assert 'if filters.get(' in content
assert 'node_props.append(' in content
# Verify the pattern is used in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_agent_id_implementation_pattern(self):
"""Test that the code structure supports agent_id implementation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Verify that agent_id pattern is used consistently
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
assert 'if filters.get("agent_id"):' in content
assert 'node_props.append("agent_id: $agent_id")' in content
def test_run_id_implementation_pattern(self):
"""Test that the code structure supports run_id implementation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Verify that run_id pattern is used consistently
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
assert 'if filters.get("run_id"):' in content
assert 'node_props.append("run_id: $run_id")' in content
def test_user_identity_integration(self):
"""Test that both agent_id and run_id are properly integrated into user identity"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that user_identity building includes both agent_id and run_id
assert 'user_identity = f"user_id: {filters[\'user_id\']}"' in content
assert 'user_identity += f", agent_id: {filters[\'agent_id\']}"' in content
assert 'user_identity += f", run_id: {filters[\'run_id\']}"' in content
def test_search_methods_integration(self):
"""Test that both agent_id and run_id are properly integrated into search methods"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that search methods handle both agent_id and run_id
assert 'where_conditions.append("source_candidate.agent_id = $agent_id")' in content
assert 'where_conditions.append("source_candidate.run_id = $run_id")' in content
assert 'where_conditions.append("destination_candidate.agent_id = $agent_id")' in content
assert 'where_conditions.append("destination_candidate.run_id = $run_id")' in content
def test_add_entities_integration(self):
"""Test that both agent_id and run_id are properly integrated into add_entities"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that add_entities handles both agent_id and run_id
assert 'agent_id = filters.get("agent_id", None)' in content
assert 'run_id = filters.get("run_id", None)' in content
# Check that merge properties include both
assert 'if agent_id:' in content
assert 'if run_id:' in content
assert 'merge_props.append("agent_id: $agent_id")' in content
assert 'merge_props.append("run_id: $run_id")' in content
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_neo4j_cypher_syntax.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/llms/anthropic.py | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class AnthropicConfig(BaseLlmConfig):
"""
Configuration class for Anthropic-specific parameters.
Inherits from BaseLlmConfig and adds Anthropic-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Anthropic-specific parameters
anthropic_base_url: Optional[str] = None,
):
"""
Initialize Anthropic configuration.
Args:
model: Anthropic model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: Anthropic API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
anthropic_base_url: Anthropic API base URL, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Anthropic-specific parameters
self.anthropic_base_url = anthropic_base_url
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/anthropic.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/azure.py | from typing import Any, Dict, Optional
from mem0.configs.base import AzureConfig
from mem0.configs.llms.base import BaseLlmConfig
class AzureOpenAIConfig(BaseLlmConfig):
"""
Configuration class for Azure OpenAI-specific parameters.
Inherits from BaseLlmConfig and adds Azure OpenAI-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Azure OpenAI-specific parameters
azure_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Initialize Azure OpenAI configuration.
Args:
model: Azure OpenAI model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: Azure OpenAI API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
azure_kwargs: Azure-specific configuration, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Azure OpenAI-specific parameters
self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/azure.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/deepseek.py | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class DeepSeekConfig(BaseLlmConfig):
"""
Configuration class for DeepSeek-specific parameters.
Inherits from BaseLlmConfig and adds DeepSeek-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# DeepSeek-specific parameters
deepseek_base_url: Optional[str] = None,
):
"""
Initialize DeepSeek configuration.
Args:
model: DeepSeek model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: DeepSeek API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
deepseek_base_url: DeepSeek API base URL, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# DeepSeek-specific parameters
self.deepseek_base_url = deepseek_base_url
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/deepseek.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/lmstudio.py | from typing import Any, Dict, Optional
from mem0.configs.llms.base import BaseLlmConfig
class LMStudioConfig(BaseLlmConfig):
"""
Configuration class for LM Studio-specific parameters.
Inherits from BaseLlmConfig and adds LM Studio-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# LM Studio-specific parameters
lmstudio_base_url: Optional[str] = None,
lmstudio_response_format: Optional[Dict[str, Any]] = None,
):
"""
Initialize LM Studio configuration.
Args:
model: LM Studio model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: LM Studio API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
lmstudio_base_url: LM Studio base URL, defaults to None
lmstudio_response_format: LM Studio response format, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# LM Studio-specific parameters
self.lmstudio_base_url = lmstudio_base_url or "http://localhost:1234/v1"
self.lmstudio_response_format = lmstudio_response_format
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/lmstudio.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/ollama.py | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class OllamaConfig(BaseLlmConfig):
"""
Configuration class for Ollama-specific parameters.
Inherits from BaseLlmConfig and adds Ollama-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Ollama-specific parameters
ollama_base_url: Optional[str] = None,
):
"""
Initialize Ollama configuration.
Args:
model: Ollama model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: Ollama API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
ollama_base_url: Ollama base URL, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Ollama-specific parameters
self.ollama_base_url = ollama_base_url
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/ollama.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/openai.py | from typing import Any, Callable, List, Optional
from mem0.configs.llms.base import BaseLlmConfig
class OpenAIConfig(BaseLlmConfig):
"""
Configuration class for OpenAI and OpenRouter-specific parameters.
Inherits from BaseLlmConfig and adds OpenAI-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# OpenAI-specific parameters
openai_base_url: Optional[str] = None,
models: Optional[List[str]] = None,
route: Optional[str] = "fallback",
openrouter_base_url: Optional[str] = None,
site_url: Optional[str] = None,
app_name: Optional[str] = None,
store: bool = False,
# Response monitoring callback
response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
):
"""
Initialize OpenAI configuration.
Args:
model: OpenAI model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: OpenAI API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
openai_base_url: OpenAI API base URL, defaults to None
models: List of models for OpenRouter, defaults to None
route: OpenRouter route strategy, defaults to "fallback"
openrouter_base_url: OpenRouter base URL, defaults to None
site_url: Site URL for OpenRouter, defaults to None
app_name: Application name for OpenRouter, defaults to None
response_callback: Optional callback for monitoring LLM responses.
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# OpenAI-specific parameters
self.openai_base_url = openai_base_url
self.models = models
self.route = route
self.openrouter_base_url = openrouter_base_url
self.site_url = site_url
self.app_name = app_name
self.store = store
# Response monitoring
self.response_callback = response_callback
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/openai.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/configs/llms/vllm.py | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class VllmConfig(BaseLlmConfig):
"""
Configuration class for vLLM-specific parameters.
Inherits from BaseLlmConfig and adds vLLM-specific settings.
"""
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# vLLM-specific parameters
vllm_base_url: Optional[str] = None,
):
"""
Initialize vLLM configuration.
Args:
model: vLLM model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: vLLM API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
vllm_base_url: vLLM base URL, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# vLLM-specific parameters
self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1"
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/llms/vllm.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:tests/vector_stores/test_pgvector.py | import importlib
import sys
import unittest
import uuid
from unittest.mock import MagicMock, patch
from mem0.vector_stores.pgvector import PGVector
class TestPGVector(unittest.TestCase):
def setUp(self):
"""Set up test fixtures."""
self.mock_conn = MagicMock()
self.mock_cursor = MagicMock()
self.mock_conn.cursor.return_value = self.mock_cursor
# Mock connection pool
self.mock_pool_psycopg2 = MagicMock()
self.mock_pool_psycopg2.getconn.return_value = self.mock_conn
self.mock_pool_psycopg = MagicMock()
self.mock_pool_psycopg.connection.return_value = self.mock_conn
self.mock_get_cursor = MagicMock()
self.mock_get_cursor.return_value = self.mock_cursor
# Mock connection string
self.connection_string = "postgresql://user:pass@host:5432/db"
# Test data
self.test_vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.test_payloads = [{"key": "value1"}, {"key": "value2"}]
self.test_ids = [str(uuid.uuid4()), str(uuid.uuid4())]
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_init_with_individual_params_psycopg3(self, mock_psycopg_pool):
"""Test initialization with individual parameters using psycopg3."""
# Mock psycopg3 to be available
mock_psycopg_pool.return_value = self.mock_pool_psycopg
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
)
mock_psycopg_pool.assert_called_once_with(
conninfo="postgresql://test_user:test_pass@localhost:5432/test_db",
min_size=1,
max_size=4,
open=True,
)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_init_with_individual_params_psycopg2(self, mock_pcycopg2_pool):
"""Test initialization with individual parameters using psycopg2."""
mock_pcycopg2_pool.return_value = self.mock_pool_psycopg2
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
)
mock_pcycopg2_pool.assert_called_once_with(
minconn=1,
maxconn=4,
dsn="postgresql://test_user:test_pass@localhost:5432/test_db",
)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg3_with_explicit_pool(self, mock_get_cursor, mock_connection_pool):
"""
Test collection creation with psycopg3 when an explicit psycopg_pool.ConnectionPool is provided.
This ensures that PGVector uses the provided pool and still performs collection creation logic.
"""
# Set up a real (mocked) psycopg_pool.ConnectionPool instance
explicit_pool = MagicMock(name="ExplicitPsycopgPool")
# The patch for ConnectionPool should not be used in this case, but we patch it for isolation
mock_connection_pool.return_value = MagicMock(name="ShouldNotBeUsed")
# Configure the _get_cursor mock to return our mock cursor as a context manager
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
# Simulate no existing collections in the database
self.mock_cursor.fetchall.return_value = []
# Pass the explicit pool to PGVector
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
connection_pool=explicit_pool
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
mock_connection_pool.assert_not_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Ensure the pool used is the explicit one
self.assertIs(pgvector.connection_pool, explicit_pool)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg2_with_explicit_pool(self, mock_get_cursor, mock_connection_pool):
"""
Test collection creation with psycopg2 when an explicit psycopg2 ThreadedConnectionPool is provided.
This ensures that PGVector uses the provided pool and still performs collection creation logic.
"""
# Set up a real (mocked) psycopg2 ThreadedConnectionPool instance
explicit_pool = MagicMock(name="ExplicitPsycopg2Pool")
# The patch for ConnectionPool should not be used in this case, but we patch it for isolation
mock_connection_pool.return_value = MagicMock(name="ShouldNotBeUsed")
# Configure the _get_cursor mock to return our mock cursor as a context manager
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
# Simulate no existing collections in the database
self.mock_cursor.fetchall.return_value = []
# Pass the explicit pool to PGVector
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
connection_pool=explicit_pool
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
mock_connection_pool.assert_not_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Ensure the pool used is the explicit one
self.assertIs(pgvector.connection_pool, explicit_pool)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_insert_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test vector insertion with psycopg3."""
# Set up mock pool and cursor
mock_connection_pool.return_value = self.mock_pool_psycopg
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.insert(self.test_vectors, self.test_payloads, self.test_ids)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify insert query was executed (psycopg3 uses executemany)
insert_calls = [call for call in self.mock_cursor.executemany.call_args_list
if "INSERT INTO test_collection" in str(call)]
self.assertTrue(len(insert_calls) > 0)
# Verify data format
call_args = self.mock_cursor.executemany.call_args
data_arg = call_args[0][1]
self.assertEqual(len(data_arg), 2)
self.assertEqual(data_arg[0][0], self.test_ids[0])
self.assertEqual(data_arg[1][0], self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_insert_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""
Test vector insertion with psycopg2.
This test ensures that PGVector.insert uses psycopg2.extras.execute_values for batch inserts
and that the data passed to execute_values is correctly formatted.
"""
# --- Setup mocks for psycopg2 and its submodules ---
mock_execute_values = MagicMock()
mock_pool = MagicMock()
# Mock psycopg2.extras with execute_values
mock_psycopg2_extras = MagicMock()
mock_psycopg2_extras.execute_values = mock_execute_values
mock_psycopg2_pool = MagicMock()
mock_psycopg2_pool.ThreadedConnectionPool = mock_pool
# Mock psycopg2 root module
mock_psycopg2 = MagicMock()
mock_psycopg2.extras = mock_psycopg2_extras
mock_psycopg2.pool = mock_psycopg2_pool
# Patch sys.modules so that imports in PGVector use our mocks
with patch.dict('sys.modules', {
'psycopg': None, # Ensure psycopg3 is not available
'psycopg_pool': None,
'psycopg.types.json': None,
'psycopg2': mock_psycopg2,
'psycopg2.extras': mock_psycopg2_extras,
'psycopg2.pool': mock_psycopg2_pool
}):
# Force reload of PGVector to pick up the mocked modules
if 'mem0.vector_stores.pgvector' in sys.modules:
importlib.reload(sys.modules['mem0.vector_stores.pgvector'])
mock_connection_pool.return_value = self.mock_pool_psycopg
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = []
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.insert(self.test_vectors, self.test_payloads, self.test_ids)
mock_get_cursor.assert_called()
mock_execute_values.assert_called_once()
call_args = mock_execute_values.call_args
self.assertIn("INSERT INTO test_collection", call_args[0][1])
# The data argument should be a list of tuples, one per vector
data_arg = call_args[0][2]
self.assertEqual(len(data_arg), 2)
self.assertEqual(data_arg[0][0], self.test_ids[0])
self.assertEqual(data_arg[1][0], self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test search with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test search with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test delete with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DELETE FROM test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test delete with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DELETE FROM test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test update with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
updated_vector = [0.5, 0.6, 0.7]
updated_payload = {"updated": "value"}
pgvector.update(self.test_ids[0], vector=updated_vector, payload=updated_payload)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify update queries were executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test update with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
updated_vector = [0.5, 0.6, 0.7]
updated_payload = {"updated": "value"}
pgvector.update(self.test_ids[0], vector=updated_vector, payload=updated_payload)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify update queries were executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_get_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test get with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = (self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"})
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
result = pgvector.get(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify get query was executed
get_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(get_calls) > 0)
# Verify result
self.assertIsNotNone(result)
self.assertEqual(result.id, self.test_ids[0])
self.assertEqual(result.payload, {"key": "value1"})
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_get_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test get with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = (self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"})
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
result = pgvector.get(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify get query was executed
get_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(get_calls) > 0)
# Verify result
self.assertIsNotNone(result)
self.assertEqual(result.id, self.test_ids[0])
self.assertEqual(result.payload, {"key": "value1"})
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_cols_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list_cols with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [("test_collection",), ("other_table",)]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
collections = pgvector.list_cols()
# Verify list_cols query was executed
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT table_name FROM information_schema.tables" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify result
self.assertEqual(collections, ["test_collection", "other_table"])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_cols_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list_cols with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [("test_collection",), ("other_table",)]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
collections = pgvector.list_cols()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list_cols query was executed
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT table_name FROM information_schema.tables" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify result
self.assertEqual(collections, ["test_collection", "other_table"])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_col_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test delete_col with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete_col()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete_col query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DROP TABLE IF EXISTS test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_col_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test delete_col with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete_col()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete_col query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DROP TABLE IF EXISTS test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_col_info_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test col_info with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = ("test_collection", 100, "1 MB")
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
info = pgvector.col_info()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify col_info query was executed
info_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT table_name" in str(call)]
self.assertTrue(len(info_calls) > 0)
# Verify result
self.assertEqual(info["name"], "test_collection")
self.assertEqual(info["count"], 100)
self.assertEqual(info["size"], "1 MB")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_col_info_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test col_info with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = ("test_collection", 100, "1 MB")
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
info = pgvector.col_info()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify col_info query was executed
info_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT table_name" in str(call)]
self.assertTrue(len(info_calls) > 0)
# Verify result
self.assertEqual(info["name"], "test_collection")
self.assertEqual(info["count"], 100)
self.assertEqual(info["size"], "1 MB")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"}),
(self.test_ids[1], [0.4, 0.5, 0.6], {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.list(limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify result
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 2)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][1].id, self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"}),
(self.test_ids[1], [0.4, 0.5, 0.6], {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.list(limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify result
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 2)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][1].id, self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_filters_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test search with filters using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=filters)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed with filters
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[0].payload["user_id"], "alice")
self.assertEqual(results[0].payload["agent_id"], "agent1")
self.assertEqual(results[0].payload["run_id"], "run1")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_filters_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test search with filters using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=filters)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed with filters
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[0].payload["user_id"], "alice")
self.assertEqual(results[0].payload["agent_id"], "agent1")
self.assertEqual(results[0].payload["run_id"], "run1")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_single_filter_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test search with single filter using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"user_id": "alice"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice"}
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=filters)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed with single filter
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[0].payload["user_id"], "alice")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_single_filter_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test search with single filter using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"user_id": "alice"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice"}
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=filters)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed with single filter
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[0].payload["user_id"], "alice")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_no_filters_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test search with no filters using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=None)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed without WHERE clause
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" not in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_with_no_filters_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test search with no filters using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2, filters=None)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed without WHERE clause
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call) and "WHERE" not in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_filters_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list with filters using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"user_id": "alice", "agent_id": "agent1"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice", "agent_id": "agent1"}
results = pgvector.list(filters=filters, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed with filters
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 1)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][0].payload["user_id"], "alice")
self.assertEqual(results[0][0].payload["agent_id"], "agent1")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_filters_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list with filters using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"user_id": "alice", "agent_id": "agent1"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice", "agent_id": "agent1"}
results = pgvector.list(filters=filters, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed with filters
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 1)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][0].payload["user_id"], "alice")
self.assertEqual(results[0][0].payload["agent_id"], "agent1")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_single_filter_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list with single filter using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"user_id": "alice"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice"}
results = pgvector.list(filters=filters, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed with single filter
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 1)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][0].payload["user_id"], "alice")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_single_filter_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list with single filter using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"user_id": "alice"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
filters = {"user_id": "alice"}
results = pgvector.list(filters=filters, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed with single filter
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 1)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][0].payload["user_id"], "alice")
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_no_filters_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list with no filters using psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"}),
(self.test_ids[1], [0.4, 0.5, 0.6], {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.list(filters=None, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed without WHERE clause
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" not in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 2)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][1].id, self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_with_no_filters_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list with no filters using psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"}),
(self.test_ids[1], [0.4, 0.5, 0.6], {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.list(filters=None, limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify list query was executed without WHERE clause
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call) and "WHERE" not in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify results
self.assertEqual(len(results), 1) # Returns list of lists
self.assertEqual(len(results[0]), 2)
self.assertEqual(results[0][0].id, self.test_ids[0])
self.assertEqual(results[0][1].id, self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_reset_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test reset with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = []
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.reset()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify reset operations were executed
drop_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DROP TABLE IF EXISTS" in str(call)]
create_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS" in str(call)]
self.assertTrue(len(drop_calls) > 0)
self.assertTrue(len(create_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_reset_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test reset with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = []
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.reset()
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify reset operations were executed
drop_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DROP TABLE IF EXISTS" in str(call)]
create_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS" in str(call)]
self.assertTrue(len(drop_calls) > 0)
self.assertTrue(len(create_calls) > 0)
# Enhanced Tests for JSON Serialization
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
@patch('mem0.vector_stores.pgvector.Json')
def test_update_payload_psycopg3_json_handling(self, mock_json, mock_get_cursor, mock_connection_pool):
"""Test that psycopg3 update uses Json() wrapper for payload serialization."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
test_payload = {"test": "data", "number": 42}
pgvector.update("test-id-123", payload=test_payload)
# Verify Json() wrapper was used for psycopg3
mock_json.assert_called_once_with(test_payload)
# Verify the update query was executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET payload" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
@patch('mem0.vector_stores.pgvector.Json')
def test_update_payload_psycopg2_json_handling(self, mock_json, mock_get_cursor, mock_connection_pool):
"""Test that psycopg2 update uses psycopg2.extras.Json() wrapper for payload serialization."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
test_payload = {"test": "data", "number": 42}
pgvector.update("test-id-123", payload=test_payload)
# Verify psycopg2.extras.Json() wrapper was used
mock_json.assert_called_once_with(test_payload)
# Verify the update query was executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET payload" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_transaction_rollback_on_error_psycopg2(self, mock_connection_pool):
"""Test that psycopg2 properly rolls back transactions on errors."""
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Set up mock connection that will raise an error only on delete
mock_conn = MagicMock()
mock_cursor = MagicMock()
mock_conn.cursor.return_value = mock_cursor
mock_pool.getconn.return_value = mock_conn
# Only raise exception on the delete operation, not during setup
def execute_side_effect(*args, **kwargs):
if args and "DELETE FROM" in str(args[0]):
raise Exception("Database error")
return MagicMock()
mock_cursor.execute.side_effect = execute_side_effect
self.mock_cursor.fetchall.return_value = [] # No existing collections initially
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Attempt an operation that will fail
with self.assertRaises(Exception) as context:
pgvector.delete("test-id")
self.assertIn("Database error", str(context.exception))
# Verify rollback was called
mock_conn.rollback.assert_called()
# Verify connection was returned to pool
mock_pool.putconn.assert_called_with(mock_conn)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_commit_on_success_psycopg2(self, mock_connection_pool):
"""Test that psycopg2 properly commits transactions on success."""
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Set up mock connection for successful operation
mock_conn = MagicMock()
mock_cursor = MagicMock()
mock_conn.cursor.return_value = mock_cursor
mock_pool.getconn.return_value = mock_conn
self.mock_cursor.fetchall.return_value = [] # No existing collections initially
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Perform an operation that requires commit
pgvector.delete("test-id")
# Verify commit was called
mock_conn.commit.assert_called()
# Verify connection was returned to pool
mock_pool.putconn.assert_called_with(mock_conn)
# Enhanced Tests for Error Handling
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_pool_connection_error_handling(self, mock_get_cursor, mock_connection_pool):
"""Test handling of connection pool errors."""
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Use a flag to only raise the exception after PGVector is initialized
raise_on_search = {'active': False}
def get_cursor_side_effect(*args, **kwargs):
if raise_on_search['active']:
raise Exception("Connection pool exhausted")
return self.mock_cursor
mock_get_cursor.side_effect = get_cursor_side_effect
self.mock_cursor.fetchall.return_value = []
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Activate the exception for search only
raise_on_search['active'] = True
with self.assertRaises(Exception) as context:
pgvector.search("test query", [0.1, 0.2, 0.3])
self.assertIn("Connection pool exhausted", str(context.exception))
# Enhanced Tests for Vector and Payload Update Combinations
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_vector_only_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test updating only vector without payload."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
test_vector = [0.1, 0.2, 0.3]
pgvector.update("test-id", vector=test_vector)
# Verify only vector update query was executed (not payload)
vector_update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET vector" in str(call) and "payload" not in str(call)]
payload_update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET payload" in str(call)]
self.assertTrue(len(vector_update_calls) > 0)
self.assertEqual(len(payload_update_calls), 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_both_vector_and_payload_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test updating both vector and payload."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
test_vector = [0.1, 0.2, 0.3]
test_payload = {"updated": True}
pgvector.update("test-id", vector=test_vector, payload=test_payload)
# Verify both vector and payload update queries were executed
vector_update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET vector" in str(call)]
payload_update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection SET payload" in str(call)]
self.assertTrue(len(vector_update_calls) > 0)
self.assertTrue(len(payload_update_calls) > 0)
# Enhanced Tests for Connection String Handling
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_connection_string_with_sslmode_psycopg3(self, mock_connection_pool):
"""Test connection string handling with SSL mode."""
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
self.mock_cursor.fetchall.return_value = [] # No existing collections
connection_string = "postgresql://user:pass@localhost:5432/db"
pgvector = PGVector(
dbname="test_db", # Will be overridden by connection_string
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
sslmode="require",
connection_string=connection_string
)
# Verify ConnectionPool was called with the connection string including sslmode
expected_conn_string = f"{connection_string} sslmode=require"
mock_connection_pool.assert_called_with(
conninfo=expected_conn_string,
min_size=1,
max_size=4,
open=True
)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Enhanced Test for Index Creation with DiskANN
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_with_diskann_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with DiskANN index."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
# Mock vectorscale extension as available
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = ("vectorscale",) # Extension exists
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=True, # Enable DiskANN
hnsw=False,
minconn=1,
maxconn=4
)
# Verify DiskANN index creation query was executed
diskann_calls = [call for call in self.mock_cursor.execute.call_args_list
if "USING diskann" in str(call)]
self.assertTrue(len(diskann_calls) > 0)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_with_hnsw_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with HNSW index."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=True, # Enable HNSW
minconn=1,
maxconn=4
)
# Verify HNSW index creation query was executed
hnsw_calls = [call for call in self.mock_cursor.execute.call_args_list
if "USING hnsw" in str(call)]
self.assertTrue(len(hnsw_calls) > 0)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Enhanced Test for Pool Cleanup
def test_pool_cleanup_psycopg3(self):
"""Test that psycopg3 pool is properly closed on object deletion."""
with patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3), \
patch('mem0.vector_stores.pgvector.ConnectionPool') as mock_connection_pool:
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Trigger __del__ method
del pgvector
# Verify pool.close() was called
mock_pool.close.assert_called()
def test_pool_cleanup_psycopg2(self):
"""Test that psycopg2 pool is properly closed on object deletion."""
with patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2), \
patch('mem0.vector_stores.pgvector.ConnectionPool') as mock_connection_pool:
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Trigger __del__ method
del pgvector
# Verify pool.closeall() was called
mock_pool.closeall.assert_called()
def tearDown(self):
"""Clean up after each test."""
pass
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_pgvector.py",
"license": "Apache License 2.0",
"lines": 1860,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:examples/misc/personalized_search.py | """
Personalized Search Agent with Mem0 + Tavily
Uses LangChain agent pattern with Tavily tools for personalized search based on user memories stored in Mem0.
"""
from dotenv import load_dotenv
from mem0 import MemoryClient
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langchain_tavily import TavilySearch
from langchain.schema import HumanMessage
from datetime import datetime
import logging
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Initialize clients
mem0_client = MemoryClient()
# Set custom instructions to infer facts and memory to understand user preferences
mem0_client.project.update(
custom_instructions='''
INFER THE MEMORIES FROM USER QUERIES EVEN IF IT'S A QUESTION.
We are building the personalized search for which we need to understand about user's preferences and life
and extract facts and memories out of it accordingly.
BE IT TIME, LOCATION, USER'S PERSONAL LIFE, CHOICES, USER'S PREFERENCES, we need to store those for better personalized search.
'''
)
llm = ChatOpenAI(model="gpt-4.1-nano-2025-04-14", temperature=0.2)
def setup_user_history(user_id):
"""Simulate realistic user conversation history"""
conversations = [
[
{"role": "user", "content": "What will be the weather today at Los Angeles? I need to go to pick up my daughter from office."},
{"role": "assistant", "content": "I'll check the weather in LA for you, so that you can plan you daughter's pickup accordingly."}
],
[
{"role": "user", "content": "I'm looking for vegan restaurants in Santa Monica"},
{"role": "assistant", "content": "I'll find great vegan options in Santa Monica."}
],
[
{"role": "user", "content": "My 7-year-old daughter is allergic to peanuts"},
{"role": "assistant",
"content": "I'll remember to check for peanut-free options in future recommendations."}
],
[
{"role": "user", "content": "I work remotely and need coffee shops with good wifi"},
{"role": "assistant", "content": "I'll find remote-work-friendly coffee shops."}
],
[
{"role": "user", "content": "We love hiking and outdoor activities on weekends"},
{"role": "assistant", "content": "Great! I'll keep your outdoor activity preferences in mind."}
]
]
logger.info(f"Setting up user history for {user_id}")
for conversation in conversations:
mem0_client.add(conversation, user_id=user_id)
def get_user_context(user_id, query):
"""Retrieve relevant user memories from Mem0"""
try:
filters = {
"AND": [
{"user_id": user_id}
]
}
user_memories = mem0_client.search(
query=query,
version="v2",
filters=filters
)
if user_memories:
context = "\n".join([f"- {memory['memory']}" for memory in user_memories])
logger.info(f"Found {len(user_memories)} relevant memories for user {user_id}")
return context
else:
logger.info(f"No relevant memories found for user {user_id}")
return "No previous user context available."
except Exception as e:
logger.error(f"Error retrieving user context: {e}")
return "Error retrieving user context."
def create_personalized_search_agent(user_context):
"""Create a LangChain agent for personalized search using Tavily"""
# Create Tavily search tool
tavily_search = TavilySearch(
max_results=10,
search_depth="advanced",
include_answer=True,
topic="general"
)
tools = [tavily_search]
# Create personalized search prompt
prompt = ChatPromptTemplate.from_messages([
("system", f"""You are a personalized search assistant. You help users find information that's relevant to their specific context and preferences.
USER CONTEXT AND PREFERENCES:
{user_context}
YOUR ROLE:
1. Analyze the user's query and their personal context/preferences above
2. Look for patterns in the context to understand their preferences, location, lifestyle, family situation, etc.
3. Create enhanced search queries that incorporate relevant personal context you discover
4. Use the tavily_search tool everytime with enhanced queries to find personalized results
INSTRUCTIONS:
- Study the user memories carefully to understand their situation
- If any questions ask something related to nearby, close to, etc. refer to previous user context for identifying locations and enhance search query based on that.
- If memories mention specific locations, consider them for local searches
- If memories reveal dietary preferences or restrictions, factor those in for food-related queries
- If memories show family context, consider family-friendly options
- If memories indicate work style or interests, incorporate those when relevant
- Use tavily_search tool everytime with enhanced queries (based on above context)
- Always explain which specific memories led you to personalize the search in certain ways
Do NOT assume anything not present in the user memories."""),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# Create agent
agent = create_openai_tools_agent(llm=llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True
)
return agent_executor
def conduct_personalized_search(user_id, query):
"""
Personalized search workflow using LangChain agent + Tavily + Mem0
Returns search results with user personalization details
"""
logger.info(f"Starting personalized search for user {user_id}: {query}")
start_time = datetime.now()
try:
# Get user context from Mem0
user_context = get_user_context(user_id, query)
# Create personalized search agent
agent_executor = create_personalized_search_agent(user_context)
# Run the agent
response = agent_executor.invoke({
"messages": [HumanMessage(content=query)]
})
# Extract search details from intermediate steps
search_queries_used = []
total_results = 0
for step in response.get("intermediate_steps", []):
tool_call, tool_output = step
if hasattr(tool_call, 'tool') and tool_call.tool == "tavily_search":
search_query = tool_call.tool_input.get('query', '')
search_queries_used.append(search_query)
if isinstance(tool_output, dict) and 'results' in tool_output:
total_results += len(tool_output.get('results', []))
# Store this search interaction in Mem0 for user preferences
store_search_interaction(user_id, query, response['output'])
# Compile results
duration = (datetime.now() - start_time).total_seconds()
results = {"agent_response": response['output']}
logger.info(f"Personalized search completed in {duration:.2f}s")
return results
except Exception as e:
logger.error(f"Error in personalized search workflow: {e}")
return {"error": str(e)}
def store_search_interaction(user_id, original_query, agent_response):
"""Store search interaction in Mem0 for future personalization"""
try:
interaction = [
{"role": "user", "content": f"Searched for: {original_query}"},
{"role": "assistant", "content": f"Provided personalized results based on user preferences: {agent_response}"}
]
mem0_client.add(messages=interaction, user_id=user_id)
logger.info(f"Stored search interaction for user {user_id}")
except Exception as e:
logger.error(f"Error storing search interaction: {e}")
def personalized_search_agent():
"""Example of the personalized search agent"""
user_id = "john"
# Setup user history
print("\nSetting up user history from past conversations...")
setup_user_history(user_id) # This is one-time setup
# Test personalized searches
test_queries = [
"good coffee shops nearby for working",
"what can we gift our daughter for birthday? what's trending?"
]
for i, query in enumerate(test_queries, 1):
print(f"\n ----- {i}️⃣ PERSONALIZED SEARCH -----")
print(f"Query: '{query}'")
# Run personalized search
results = conduct_personalized_search(user_id, query)
if results.get("error"):
print(f"Error: {results['error']}")
else:
print(f"Agent response: {results['agent_response']}")
if __name__ == "__main__":
personalized_search_agent()
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/personalized_search.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:examples/multiagents/llamaindex_learning_system.py | """
Multi-Agent Personal Learning System: Mem0 + LlamaIndex AgentWorkflow Example
INSTALLATIONS:
!pip install llama-index-core llama-index-memory-mem0 openai
You need MEM0_API_KEY and OPENAI_API_KEY to run the example.
"""
import asyncio
import logging
from datetime import datetime
from dotenv import load_dotenv
# LlamaIndex imports
from llama_index.core.agent.workflow import AgentWorkflow, FunctionAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
# Memory integration
from llama_index.memory.mem0 import Mem0Memory
load_dotenv()
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(), logging.FileHandler("learning_system.log")],
)
logger = logging.getLogger(__name__)
class MultiAgentLearningSystem:
"""
Multi-Agent Architecture:
- TutorAgent: Main teaching and explanations
- PracticeAgent: Exercises and skill reinforcement
- Shared Memory: Both agents learn from student interactions
"""
def __init__(self, student_id: str):
self.student_id = student_id
self.llm = OpenAI(model="gpt-4.1-nano-2025-04-14", temperature=0.2)
# Memory context for this student
self.memory_context = {"user_id": student_id, "app": "learning_assistant"}
self.memory = Mem0Memory.from_client(context=self.memory_context)
self._setup_agents()
def _setup_agents(self):
"""Setup two agents that work together and share memory"""
# TOOLS
async def assess_understanding(topic: str, student_response: str) -> str:
"""Assess student's understanding of a topic and save insights"""
# Simulate assessment logic
if "confused" in student_response.lower() or "don't understand" in student_response.lower():
assessment = f"STRUGGLING with {topic}: {student_response}"
insight = f"Student needs more help with {topic}. Prefers step-by-step explanations."
elif "makes sense" in student_response.lower() or "got it" in student_response.lower():
assessment = f"UNDERSTANDS {topic}: {student_response}"
insight = f"Student grasped {topic} quickly. Can move to advanced concepts."
else:
assessment = f"PARTIAL understanding of {topic}: {student_response}"
insight = f"Student has basic understanding of {topic}. Needs reinforcement."
return f"Assessment: {assessment}\nInsight saved: {insight}"
async def track_progress(topic: str, success_rate: str) -> str:
"""Track learning progress and identify patterns"""
progress_note = f"Progress on {topic}: {success_rate} - {datetime.now().strftime('%Y-%m-%d')}"
return f"Progress tracked: {progress_note}"
# Convert to FunctionTools
tools = [
FunctionTool.from_defaults(async_fn=assess_understanding),
FunctionTool.from_defaults(async_fn=track_progress),
]
# === AGENTS ===
# Tutor Agent - Main teaching and explanation
self.tutor_agent = FunctionAgent(
name="TutorAgent",
description="Primary instructor that explains concepts and adapts to student needs",
system_prompt="""
You are a patient, adaptive programming tutor. Your key strength is REMEMBERING and BUILDING on previous interactions.
Key Behaviors:
1. Always check what the student has learned before (use memory context)
2. Adapt explanations based on their preferred learning style
3. Reference previous struggles or successes
4. Build progressively on past lessons
5. Use assess_understanding to evaluate responses and save insights
MEMORY-DRIVEN TEACHING:
- "Last time you struggled with X, so let's approach Y differently..."
- "Since you prefer visual examples, here's a diagram..."
- "Building on the functions we covered yesterday..."
When student shows understanding, hand off to PracticeAgent for exercises.
""",
tools=tools,
llm=self.llm,
can_handoff_to=["PracticeAgent"],
)
# Practice Agent - Exercises and reinforcement
self.practice_agent = FunctionAgent(
name="PracticeAgent",
description="Creates practice exercises and tracks progress based on student's learning history",
system_prompt="""
You create personalized practice exercises based on the student's learning history and current level.
Key Behaviors:
1. Generate problems that match their skill level (from memory)
2. Focus on areas they've struggled with previously
3. Gradually increase difficulty based on their progress
4. Use track_progress to record their performance
5. Provide encouraging feedback that references their growth
MEMORY-DRIVEN PRACTICE:
- "Let's practice loops again since you wanted more examples..."
- "Here's a harder version of the problem you solved yesterday..."
- "You've improved a lot in functions, ready for the next level?"
After practice, can hand back to TutorAgent for concept review if needed.
""",
tools=tools,
llm=self.llm,
can_handoff_to=["TutorAgent"],
)
# Create the multi-agent workflow
self.workflow = AgentWorkflow(
agents=[self.tutor_agent, self.practice_agent],
root_agent=self.tutor_agent.name,
initial_state={
"current_topic": "",
"student_level": "beginner",
"learning_style": "unknown",
"session_goals": [],
},
)
async def start_learning_session(self, topic: str, student_message: str = "") -> str:
"""
Start a learning session with multi-agent memory-aware teaching
"""
if student_message:
request = f"I want to learn about {topic}. {student_message}"
else:
request = f"I want to learn about {topic}."
# The magic happens here - multi-agent memory is automatically shared!
response = await self.workflow.run(user_msg=request, memory=self.memory)
return str(response)
async def get_learning_history(self) -> str:
"""Show what the system remembers about this student"""
try:
# Search memory for learning patterns
memories = self.memory.search(user_id=self.student_id, query="learning machine learning")
if memories and len(memories):
history = "\n".join(f"- {m['memory']}" for m in memories)
return history
else:
return "No learning history found yet. Let's start building your profile!"
except Exception as e:
return f"Memory retrieval error: {str(e)}"
async def run_learning_agent():
learning_system = MultiAgentLearningSystem(student_id="Alexander")
# First session
logger.info("Session 1:")
response = await learning_system.start_learning_session(
"Vision Language Models",
"I'm new to machine learning but I have good hold on Python and have 4 years of work experience.",
)
logger.info(response)
# Second session - multi-agent memory will remember the first
logger.info("\nSession 2:")
response2 = await learning_system.start_learning_session("Machine Learning", "what all did I cover so far?")
logger.info(response2)
# Show what the multi-agent system remembers
logger.info("\nLearning History:")
history = await learning_system.get_learning_history()
logger.info(history)
if __name__ == "__main__":
"""Run the example"""
logger.info("Multi-agent Learning System powered by LlamaIndex and Mem0")
async def main():
await run_learning_agent()
asyncio.run(main())
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/multiagents/llamaindex_learning_system.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:examples/misc/multillm_memory.py | """
Multi-LLM Research Team with Shared Knowledge Base
Use Case: AI Research Team where each model has different strengths:
- GPT-4: Technical analysis and code review
- Claude: Writing and documentation
All models share a common knowledge base, building on each other's work.
Example: GPT-4 analyzes a tech stack → Claude writes documentation →
Data analyst analyzes user data → All models can reference previous research.
"""
import logging
from dotenv import load_dotenv
from litellm import completion
from mem0 import MemoryClient
load_dotenv()
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(), logging.FileHandler("research_team.log")],
)
logger = logging.getLogger(__name__)
# Initialize memory client (platform version)
memory = MemoryClient()
# Research team models with specialized roles
RESEARCH_TEAM = {
"tech_analyst": {
"model": "gpt-4.1-nano-2025-04-14",
"role": "Technical Analyst - Code review, architecture, and technical decisions",
},
"writer": {
"model": "claude-3-5-sonnet-20241022",
"role": "Documentation Writer - Clear explanations and user guides",
},
"data_analyst": {
"model": "gpt-4.1-nano-2025-04-14",
"role": "Data Analyst - Insights, trends, and data-driven recommendations",
},
}
def get_team_knowledge(topic: str, project_id: str) -> str:
"""Get relevant research from the team's shared knowledge base"""
memories = memory.search(query=topic, user_id=project_id, limit=5)
if memories:
knowledge = "Team Knowledge Base:\n"
for mem in memories:
if "memory" in mem:
# Get metadata to show which team member contributed
metadata = mem.get("metadata", {})
contributor = metadata.get("contributor", "Unknown")
knowledge += f"• [{contributor}] {mem['memory']}\n"
return knowledge
return "Team Knowledge Base: Empty - starting fresh research"
def research_with_specialist(task: str, specialist: str, project_id: str) -> str:
"""Assign research task to specialist with access to team knowledge"""
if specialist not in RESEARCH_TEAM:
return f"Unknown specialist. Available: {list(RESEARCH_TEAM.keys())}"
# Get team's accumulated knowledge
team_knowledge = get_team_knowledge(task, project_id)
# Specialist role and model
spec_info = RESEARCH_TEAM[specialist]
system_prompt = f"""You are the {spec_info['role']}.
{team_knowledge}
Build upon the team's existing research. Reference previous findings when relevant.
Provide actionable insights in your area of expertise."""
# Call the specialist's model
response = completion(
model=spec_info["model"],
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": task}],
)
result = response.choices[0].message.content
# Store research in shared knowledge base using both user_id and agent_id
research_entry = [{"role": "user", "content": f"Task: {task}"}, {"role": "assistant", "content": result}]
memory.add(
research_entry,
user_id=project_id, # Project-level memory
agent_id=specialist, # Agent-specific memory
metadata={"contributor": specialist, "task_type": "research", "model_used": spec_info["model"]},
)
return result
def show_team_knowledge(project_id: str):
"""Display the team's accumulated research"""
memories = memory.get_all(user_id=project_id)
if not memories:
logger.info("No research found for this project")
return
logger.info(f"Team Research Summary (Project: {project_id}):")
# Group by contributor
by_contributor = {}
for mem in memories:
if "metadata" in mem and mem["metadata"]:
contributor = mem["metadata"].get("contributor", "Unknown")
if contributor not in by_contributor:
by_contributor[contributor] = []
by_contributor[contributor].append(mem.get("memory", ""))
for contributor, research_items in by_contributor.items():
logger.info(f"{contributor.upper()}:")
for i, item in enumerate(research_items[:3], 1): # Show latest 3
logger.info(f" {i}. {item[:100]}...")
def demo_research_team():
"""Demo: Building a SaaS product with the research team"""
project = "saas_product_research"
# Define research pipeline
research_pipeline = [
{
"stage": "Technical Architecture",
"specialist": "tech_analyst",
"task": "Analyze the best tech stack for a multi-tenant SaaS platform handling 10k+ users. Consider scalability, cost, and development speed.",
},
{
"stage": "Product Documentation",
"specialist": "writer",
"task": "Based on the technical analysis, write a clear product overview and user onboarding guide for our SaaS platform.",
},
{
"stage": "Market Analysis",
"specialist": "data_analyst",
"task": "Analyze market trends and pricing strategies for our SaaS platform. What metrics should we track?",
},
{
"stage": "Strategic Decision",
"specialist": "tech_analyst",
"task": "Given our technical architecture, documentation, and market analysis - what should be our MVP feature priority?",
},
]
logger.info("AI Research Team: Building a SaaS Product")
# Execute research pipeline
for i, step in enumerate(research_pipeline, 1):
logger.info(f"\nStage {i}: {step['stage']}")
logger.info(f"Specialist: {step['specialist']}")
result = research_with_specialist(step["task"], step["specialist"], project)
logger.info(f"Task: {step['task']}")
logger.info(f"Result: {result[:200]}...\n")
show_team_knowledge(project)
if __name__ == "__main__":
logger.info("Multi-LLM Research Team")
demo_research_team()
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/multillm_memory.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/client/project.py | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
import httpx
from pydantic import BaseModel, ConfigDict, Field
from mem0.client.utils import api_error_handler
from mem0.memory.telemetry import capture_client_event
# Exception classes are referenced in docstrings only
logger = logging.getLogger(__name__)
class ProjectConfig(BaseModel):
"""
Configuration for project management operations.
"""
org_id: Optional[str] = Field(default=None, description="Organization ID")
project_id: Optional[str] = Field(default=None, description="Project ID")
user_email: Optional[str] = Field(default=None, description="User email")
model_config = ConfigDict(validate_assignment=True, extra="forbid")
class BaseProject(ABC):
"""
Abstract base class for project management operations.
"""
def __init__(
self,
client: Any,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
self._client = client
# Handle config initialization
if config is not None:
self.config = config
else:
# Create config from parameters
self.config = ProjectConfig(org_id=org_id, project_id=project_id, user_email=user_email)
@property
def org_id(self) -> Optional[str]:
"""Get the organization ID."""
return self.config.org_id
@property
def project_id(self) -> Optional[str]:
"""Get the project ID."""
return self.config.project_id
@property
def user_email(self) -> Optional[str]:
"""Get the user email."""
return self.config.user_email
def _validate_org_project(self) -> None:
"""
Validate that both org_id and project_id are set.
Raises:
ValueError: If org_id or project_id are not set.
"""
if not (self.config.org_id and self.config.project_id):
raise ValueError("org_id and project_id must be set to access project operations")
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Prepare query parameters for API requests.
Args:
kwargs: Additional keyword arguments.
Returns:
Dictionary containing prepared parameters.
Raises:
ValueError: If org_id or project_id validation fails.
"""
if kwargs is None:
kwargs = {}
# Add org_id and project_id if available
if self.config.org_id and self.config.project_id:
kwargs["org_id"] = self.config.org_id
kwargs["project_id"] = self.config.project_id
elif self.config.org_id or self.config.project_id:
raise ValueError("Please provide both org_id and project_id")
return {k: v for k, v in kwargs.items() if v is not None}
def _prepare_org_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Prepare query parameters for organization-level API requests.
Args:
kwargs: Additional keyword arguments.
Returns:
Dictionary containing prepared parameters.
Raises:
ValueError: If org_id is not provided.
"""
if kwargs is None:
kwargs = {}
# Add org_id if available
if self.config.org_id:
kwargs["org_id"] = self.config.org_id
else:
raise ValueError("org_id must be set for organization-level operations")
return {k: v for k, v in kwargs.items() if v is not None}
@abstractmethod
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
pass
@abstractmethod
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
class Project(BaseProject):
"""
Synchronous project management operations.
"""
def __init__(
self,
client: httpx.Client,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the synchronous project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = self._prepare_params({"fields": fields})
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "sync",
},
)
return response.json()
@api_error_handler
def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = {"email": email}
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "sync"},
)
return response.json()
class AsyncProject(BaseProject):
"""
Asynchronous project management operations.
"""
def __init__(
self,
client: httpx.AsyncClient,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the asynchronous project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
async def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = self._prepare_params({"fields": fields})
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = await self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "async",
},
)
return response.json()
@api_error_handler
async def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = {"email": email}
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "async"},
)
return response.json()
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/client/project.py",
"license": "Apache License 2.0",
"lines": 788,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:mem0/client/utils.py | import json
import logging
import httpx
from mem0.exceptions import (
NetworkError,
create_exception_from_response,
)
logger = logging.getLogger(__name__)
class APIError(Exception):
"""Exception raised for errors in the API.
Deprecated: Use specific exception classes from mem0.exceptions instead.
This class is maintained for backward compatibility.
"""
pass
def api_error_handler(func):
"""Decorator to handle API errors consistently.
This decorator catches HTTP and request errors and converts them to
appropriate structured exception classes with detailed error information.
The decorator analyzes HTTP status codes and response content to create
the most specific exception type with helpful error messages, suggestions,
and debug information.
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
# Extract error details from response
response_text = ""
error_details = {}
debug_info = {
"status_code": e.response.status_code,
"url": str(e.request.url),
"method": e.request.method,
}
try:
response_text = e.response.text
# Try to parse JSON response for additional error details
if e.response.headers.get("content-type", "").startswith("application/json"):
error_data = json.loads(response_text)
if isinstance(error_data, dict):
error_details = error_data
response_text = error_data.get("detail", response_text)
except (json.JSONDecodeError, AttributeError):
# Fallback to plain text response
pass
# Add rate limit information if available
if e.response.status_code == 429:
retry_after = e.response.headers.get("Retry-After")
if retry_after:
try:
debug_info["retry_after"] = int(retry_after)
except ValueError:
pass
# Add rate limit headers if available
for header in ["X-RateLimit-Limit", "X-RateLimit-Remaining", "X-RateLimit-Reset"]:
value = e.response.headers.get(header)
if value:
debug_info[header.lower().replace("-", "_")] = value
# Create specific exception based on status code
exception = create_exception_from_response(
status_code=e.response.status_code,
response_text=response_text,
details=error_details,
debug_info=debug_info,
)
raise exception
except httpx.RequestError as e:
logger.error(f"Request error occurred: {e}")
# Determine the appropriate exception type based on error type
if isinstance(e, httpx.TimeoutException):
raise NetworkError(
message=f"Request timed out: {str(e)}",
error_code="NET_TIMEOUT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "timeout", "original_error": str(e)},
)
elif isinstance(e, httpx.ConnectError):
raise NetworkError(
message=f"Connection failed: {str(e)}",
error_code="NET_CONNECT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "connection", "original_error": str(e)},
)
else:
# Generic network error for other request errors
raise NetworkError(
message=f"Network request failed: {str(e)}",
error_code="NET_GENERIC",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "request", "original_error": str(e)},
)
return wrapper
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/client/utils.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/test_memory_integration.py | from unittest.mock import MagicMock, patch
from mem0.memory.main import Memory
def test_memory_configuration_without_env_vars():
"""Test Memory configuration with mock config instead of environment variables"""
# Mock configuration without relying on environment variables
mock_config = {
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4",
"temperature": 0.1,
"max_tokens": 1500,
},
},
"vector_store": {
"provider": "chroma",
"config": {
"collection_name": "test_collection",
"path": "./test_db",
},
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-ada-002",
},
},
}
# Test messages similar to the main.py file
test_messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{
"role": "assistant",
"content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions.",
},
]
# Mock the Memory class methods to avoid actual API calls
with patch.object(Memory, "__init__", return_value=None):
with patch.object(Memory, "from_config") as mock_from_config:
with patch.object(Memory, "add") as mock_add:
with patch.object(Memory, "get_all") as mock_get_all:
# Configure mocks
mock_memory_instance = MagicMock()
mock_from_config.return_value = mock_memory_instance
mock_add.return_value = {
"results": [
{"id": "1", "text": "Alex is a vegetarian"},
{"id": "2", "text": "Alex is allergic to nuts"},
]
}
mock_get_all.return_value = [
{"id": "1", "text": "Alex is a vegetarian", "metadata": {"category": "dietary_preferences"}},
{"id": "2", "text": "Alex is allergic to nuts", "metadata": {"category": "allergies"}},
]
# Test the workflow
mem = Memory.from_config(config_dict=mock_config)
assert mem is not None
# Test adding memories
result = mock_add(test_messages, user_id="alice", metadata={"category": "book_recommendations"})
assert "results" in result
assert len(result["results"]) == 2
# Test retrieving memories
all_memories = mock_get_all(user_id="alice")
assert len(all_memories) == 2
assert any("vegetarian" in memory["text"] for memory in all_memories)
assert any("allergic to nuts" in memory["text"] for memory in all_memories)
def test_azure_config_structure():
"""Test that Azure configuration structure is properly formatted"""
# Test Azure configuration structure (without actual credentials)
azure_config = {
"llm": {
"provider": "azure_openai",
"config": {
"model": "gpt-4",
"temperature": 0.1,
"max_tokens": 1500,
"azure_kwargs": {
"azure_deployment": "test-deployment",
"api_version": "2023-12-01-preview",
"azure_endpoint": "https://test.openai.azure.com/",
"api_key": "test-key",
},
},
},
"vector_store": {
"provider": "azure_ai_search",
"config": {
"service_name": "test-service",
"api_key": "test-key",
"collection_name": "test-collection",
"embedding_model_dims": 1536,
},
},
"embedder": {
"provider": "azure_openai",
"config": {
"model": "text-embedding-ada-002",
"api_key": "test-key",
"azure_kwargs": {
"api_version": "2023-12-01-preview",
"azure_deployment": "test-embedding-deployment",
"azure_endpoint": "https://test.openai.azure.com/",
"api_key": "test-key",
},
},
},
}
# Validate configuration structure
assert "llm" in azure_config
assert "vector_store" in azure_config
assert "embedder" in azure_config
# Validate Azure-specific configurations
assert azure_config["llm"]["provider"] == "azure_openai"
assert "azure_kwargs" in azure_config["llm"]["config"]
assert "azure_deployment" in azure_config["llm"]["config"]["azure_kwargs"]
assert azure_config["vector_store"]["provider"] == "azure_ai_search"
assert "service_name" in azure_config["vector_store"]["config"]
assert azure_config["embedder"]["provider"] == "azure_openai"
assert "azure_kwargs" in azure_config["embedder"]["config"]
def test_memory_messages_format():
"""Test that memory messages are properly formatted"""
# Test message format from main.py
messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{
"role": "assistant",
"content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions.",
},
]
# Validate message structure
assert len(messages) == 2
assert all("role" in msg for msg in messages)
assert all("content" in msg for msg in messages)
# Validate roles
assert messages[0]["role"] == "user"
assert messages[1]["role"] == "assistant"
# Validate content
assert "vegetarian" in messages[0]["content"].lower()
assert "allergic to nuts" in messages[0]["content"].lower()
assert "vegetarian" in messages[1]["content"].lower()
assert "nut allergy" in messages[1]["content"].lower()
def test_safe_update_prompt_constant():
"""Test the SAFE_UPDATE_PROMPT constant from main.py"""
SAFE_UPDATE_PROMPT = """
Based on the user's latest messages, what new preference can be inferred?
Reply only in this json_object format:
"""
# Validate prompt structure
assert isinstance(SAFE_UPDATE_PROMPT, str)
assert "user's latest messages" in SAFE_UPDATE_PROMPT
assert "json_object format" in SAFE_UPDATE_PROMPT
assert len(SAFE_UPDATE_PROMPT.strip()) > 0
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/test_memory_integration.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/graphs/neptune/base.py | import logging
from abc import ABC, abstractmethod
from mem0.memory.utils import format_entities
try:
from rank_bm25 import BM25Okapi
except ImportError:
raise ImportError("rank_bm25 is not installed. Please install it using pip install rank-bm25")
from mem0.graphs.tools import (
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
DELETE_MEMORY_TOOL_GRAPH,
EXTRACT_ENTITIES_STRUCT_TOOL,
EXTRACT_ENTITIES_TOOL,
RELATIONS_STRUCT_TOOL,
RELATIONS_TOOL,
)
from mem0.graphs.utils import EXTRACT_RELATIONS_PROMPT, get_delete_messages
from mem0.utils.factory import EmbedderFactory, LlmFactory, VectorStoreFactory
logger = logging.getLogger(__name__)
class NeptuneBase(ABC):
"""
Abstract base class for neptune (neptune analytics and neptune db) calls using OpenCypher
to store/retrieve data
"""
@staticmethod
def _create_embedding_model(config):
"""
:return: the Embedder model used for memory store
"""
return EmbedderFactory.create(
config.embedder.provider,
config.embedder.config,
{"enable_embeddings": True},
)
@staticmethod
def _create_llm(config, llm_provider):
"""
:return: the llm model used for memory store
"""
return LlmFactory.create(llm_provider, config.llm.config)
@staticmethod
def _create_vector_store(vector_store_provider, config):
"""
:param vector_store_provider: name of vector store
:param config: the vector_store configuration
:return:
"""
return VectorStoreFactory.create(vector_store_provider, config.vector_store.config)
def add(self, data, filters):
"""
Adds data to the graph.
Args:
data (str): The data to add to the graph.
filters (dict): A dictionary containing filters to be applied during the addition.
"""
entity_type_map = self._retrieve_nodes_from_data(data, filters)
to_be_added = self._establish_nodes_relations_from_data(data, filters, entity_type_map)
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
to_be_deleted = self._get_delete_entities_from_search_output(search_output, data, filters)
deleted_entities = self._delete_entities(to_be_deleted, filters["user_id"])
added_entities = self._add_entities(to_be_added, filters["user_id"], entity_type_map)
return {"deleted_entities": deleted_entities, "added_entities": added_entities}
def _retrieve_nodes_from_data(self, data, filters):
"""
Extract all entities mentioned in the query.
"""
_tools = [EXTRACT_ENTITIES_TOOL]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [EXTRACT_ENTITIES_STRUCT_TOOL]
search_results = self.llm.generate_response(
messages=[
{
"role": "system",
"content": f"You are a smart assistant who understands entities and their types in a given text. If user message contains self reference such as 'I', 'me', 'my' etc. then use {filters['user_id']} as the source entity. Extract all the entities from the text. ***DO NOT*** answer the question itself if the given text is a question.",
},
{"role": "user", "content": data},
],
tools=_tools,
)
entity_type_map = {}
try:
for tool_call in search_results["tool_calls"]:
if tool_call["name"] != "extract_entities":
continue
for item in tool_call["arguments"]["entities"]:
entity_type_map[item["entity"]] = item["entity_type"]
except Exception as e:
logger.exception(
f"Error in search tool: {e}, llm_provider={self.llm_provider}, search_results={search_results}"
)
entity_type_map = {k.lower().replace(" ", "_"): v.lower().replace(" ", "_") for k, v in entity_type_map.items()}
return entity_type_map
def _establish_nodes_relations_from_data(self, data, filters, entity_type_map):
"""
Establish relations among the extracted nodes.
"""
if self.config.graph_store.custom_prompt:
messages = [
{
"role": "system",
"content": EXTRACT_RELATIONS_PROMPT.replace("USER_ID", filters["user_id"]).replace(
"CUSTOM_PROMPT", f"4. {self.config.graph_store.custom_prompt}"
),
},
{"role": "user", "content": data},
]
else:
messages = [
{
"role": "system",
"content": EXTRACT_RELATIONS_PROMPT.replace("USER_ID", filters["user_id"]),
},
{
"role": "user",
"content": f"List of entities: {list(entity_type_map.keys())}. \n\nText: {data}",
},
]
_tools = [RELATIONS_TOOL]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [RELATIONS_STRUCT_TOOL]
extracted_entities = self.llm.generate_response(
messages=messages,
tools=_tools,
)
entities = []
if extracted_entities["tool_calls"]:
entities = extracted_entities["tool_calls"][0]["arguments"]["entities"]
entities = self._remove_spaces_from_entities(entities)
logger.debug(f"Extracted entities: {entities}")
return entities
def _remove_spaces_from_entities(self, entity_list):
for item in entity_list:
item["source"] = item["source"].lower().replace(" ", "_")
item["relationship"] = item["relationship"].lower().replace(" ", "_")
item["destination"] = item["destination"].lower().replace(" ", "_")
return entity_list
def _get_delete_entities_from_search_output(self, search_output, data, filters):
"""
Get the entities to be deleted from the search output.
"""
search_output_string = format_entities(search_output)
system_prompt, user_prompt = get_delete_messages(search_output_string, data, filters["user_id"])
_tools = [DELETE_MEMORY_TOOL_GRAPH]
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
_tools = [
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
]
memory_updates = self.llm.generate_response(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
tools=_tools,
)
to_be_deleted = []
for item in memory_updates["tool_calls"]:
if item["name"] == "delete_graph_memory":
to_be_deleted.append(item["arguments"])
# in case if it is not in the correct format
to_be_deleted = self._remove_spaces_from_entities(to_be_deleted)
logger.debug(f"Deleted relationships: {to_be_deleted}")
return to_be_deleted
def _delete_entities(self, to_be_deleted, user_id):
"""
Delete the entities from the graph.
"""
results = []
for item in to_be_deleted:
source = item["source"]
destination = item["destination"]
relationship = item["relationship"]
# Delete the specific relationship between nodes
cypher, params = self._delete_entities_cypher(source, destination, relationship, user_id)
result = self.graph.query(cypher, params=params)
results.append(result)
return results
@abstractmethod
def _delete_entities_cypher(self, source, destination, relationship, user_id):
"""
Returns the OpenCypher query and parameters for deleting entities in the graph DB
"""
pass
def _add_entities(self, to_be_added, user_id, entity_type_map):
"""
Add the new entities to the graph. Merge the nodes if they already exist.
"""
results = []
for item in to_be_added:
# entities
source = item["source"]
destination = item["destination"]
relationship = item["relationship"]
# types
source_type = entity_type_map.get(source, "__User__")
destination_type = entity_type_map.get(destination, "__User__")
# embeddings
source_embedding = self.embedding_model.embed(source)
dest_embedding = self.embedding_model.embed(destination)
# search for the nodes with the closest embeddings
source_node_search_result = self._search_source_node(source_embedding, user_id, threshold=self.threshold)
destination_node_search_result = self._search_destination_node(dest_embedding, user_id, threshold=self.threshold)
cypher, params = self._add_entities_cypher(
source_node_search_result,
source,
source_embedding,
source_type,
destination_node_search_result,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
)
result = self.graph.query(cypher, params=params)
results.append(result)
return results
def _add_entities_cypher(
self,
source_node_list,
source,
source_embedding,
source_type,
destination_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
"""
Returns the OpenCypher query and parameters for adding entities in the graph DB
"""
if not destination_node_list and source_node_list:
return self._add_entities_by_source_cypher(
source_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id)
elif destination_node_list and not source_node_list:
return self._add_entities_by_destination_cypher(
source,
source_embedding,
source_type,
destination_node_list,
relationship,
user_id)
elif source_node_list and destination_node_list:
return self._add_relationship_entities_cypher(
source_node_list,
destination_node_list,
relationship,
user_id)
# else source_node_list and destination_node_list are empty
return self._add_new_entities_cypher(
source,
source_embedding,
source_type,
destination,
dest_embedding,
destination_type,
relationship,
user_id)
@abstractmethod
def _add_entities_by_source_cypher(
self,
source_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
pass
@abstractmethod
def _add_entities_by_destination_cypher(
self,
source,
source_embedding,
source_type,
destination_node_list,
relationship,
user_id,
):
pass
@abstractmethod
def _add_relationship_entities_cypher(
self,
source_node_list,
destination_node_list,
relationship,
user_id,
):
pass
@abstractmethod
def _add_new_entities_cypher(
self,
source,
source_embedding,
source_type,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
pass
def search(self, query, filters, limit=100):
"""
Search for memories and related graph data.
Args:
query (str): Query to search for.
filters (dict): A dictionary containing filters to be applied during the search.
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
Returns:
dict: A dictionary containing:
- "contexts": List of search results from the base data store.
- "entities": List of related graph data based on the query.
"""
entity_type_map = self._retrieve_nodes_from_data(query, filters)
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
if not search_output:
return []
search_outputs_sequence = [
[item["source"], item["relationship"], item["destination"]] for item in search_output
]
bm25 = BM25Okapi(search_outputs_sequence)
tokenized_query = query.split(" ")
reranked_results = bm25.get_top_n(tokenized_query, search_outputs_sequence, n=5)
search_results = []
for item in reranked_results:
search_results.append({"source": item[0], "relationship": item[1], "destination": item[2]})
return search_results
def _search_source_node(self, source_embedding, user_id, threshold=0.9):
cypher, params = self._search_source_node_cypher(source_embedding, user_id, threshold)
result = self.graph.query(cypher, params=params)
return result
@abstractmethod
def _search_source_node_cypher(self, source_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for source nodes
"""
pass
def _search_destination_node(self, destination_embedding, user_id, threshold=0.9):
cypher, params = self._search_destination_node_cypher(destination_embedding, user_id, threshold)
result = self.graph.query(cypher, params=params)
return result
@abstractmethod
def _search_destination_node_cypher(self, destination_embedding, user_id, threshold):
"""
Returns the OpenCypher query and parameters to search for destination nodes
"""
pass
def delete_all(self, filters):
cypher, params = self._delete_all_cypher(filters)
self.graph.query(cypher, params=params)
@abstractmethod
def _delete_all_cypher(self, filters):
"""
Returns the OpenCypher query and parameters to delete all edges/nodes in the memory store
"""
pass
def get_all(self, filters, limit=100):
"""
Retrieves all nodes and relationships from the graph database based on filtering criteria.
Args:
filters (dict): A dictionary containing filters to be applied during the retrieval.
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
Returns:
list: A list of dictionaries, each containing:
- 'contexts': The base data store response for each memory.
- 'entities': A list of strings representing the nodes and relationships
"""
# return all nodes and relationships
query, params = self._get_all_cypher(filters, limit)
results = self.graph.query(query, params=params)
final_results = []
for result in results:
final_results.append(
{
"source": result["source"],
"relationship": result["relationship"],
"target": result["target"],
}
)
logger.debug(f"Retrieved {len(final_results)} relationships")
return final_results
@abstractmethod
def _get_all_cypher(self, filters, limit):
"""
Returns the OpenCypher query and parameters to get all edges/nodes in the memory store
"""
pass
def _search_graph_db(self, node_list, filters, limit=100):
"""
Search similar nodes among and their respective incoming and outgoing relations.
"""
result_relations = []
for node in node_list:
n_embedding = self.embedding_model.embed(node)
cypher_query, params = self._search_graph_db_cypher(n_embedding, filters, limit)
ans = self.graph.query(cypher_query, params=params)
result_relations.extend(ans)
return result_relations
@abstractmethod
def _search_graph_db_cypher(self, n_embedding, filters, limit):
"""
Returns the OpenCypher query and parameters to search for similar nodes in the memory store
"""
pass
# Reset is not defined in base.py
def reset(self):
"""
Reset the graph by clearing all nodes and relationships.
link: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/neptune-graph/client/reset_graph.html
"""
logger.warning("Clearing graph...")
graph_id = self.graph.graph_identifier
self.graph.client.reset_graph(
graphIdentifier=graph_id,
skipSnapshot=True,
)
waiter = self.graph.client.get_waiter("graph_available")
waiter.wait(graphIdentifier=graph_id, WaiterConfig={"Delay": 10, "MaxAttempts": 60})
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/graphs/neptune/base.py",
"license": "Apache License 2.0",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/memory/test_neptune_memory.py | import unittest
from unittest.mock import MagicMock, patch
import pytest
from mem0.graphs.neptune.neptunedb import MemoryGraph
from mem0.graphs.neptune.base import NeptuneBase
class TestNeptuneMemory(unittest.TestCase):
"""Test suite for the Neptune Memory implementation."""
def setUp(self):
"""Set up test fixtures before each test method."""
# Create a mock config
self.config = MagicMock()
self.config.graph_store.config.endpoint = "neptune-db://test-graph"
self.config.graph_store.config.base_label = True
self.config.graph_store.threshold = 0.7
self.config.llm.provider = "openai_structured"
self.config.graph_store.llm = None
self.config.graph_store.custom_prompt = None
self.config.vector_store.provider = "qdrant"
self.config.vector_store.config = MagicMock()
# Create mock for NeptuneGraph
self.mock_graph = MagicMock()
# Create mocks for static methods
self.mock_embedding_model = MagicMock()
self.mock_llm = MagicMock()
self.mock_vector_store = MagicMock()
# Patch the necessary components
self.neptune_graph_patcher = patch("mem0.graphs.neptune.neptunedb.NeptuneGraph")
self.mock_neptune_graph = self.neptune_graph_patcher.start()
self.mock_neptune_graph.return_value = self.mock_graph
# Patch the static methods
self.create_embedding_model_patcher = patch.object(NeptuneBase, "_create_embedding_model")
self.mock_create_embedding_model = self.create_embedding_model_patcher.start()
self.mock_create_embedding_model.return_value = self.mock_embedding_model
self.create_llm_patcher = patch.object(NeptuneBase, "_create_llm")
self.mock_create_llm = self.create_llm_patcher.start()
self.mock_create_llm.return_value = self.mock_llm
self.create_vector_store_patcher = patch.object(NeptuneBase, "_create_vector_store")
self.mock_create_vector_store = self.create_vector_store_patcher.start()
self.mock_create_vector_store.return_value = self.mock_vector_store
# Create the MemoryGraph instance
self.memory_graph = MemoryGraph(self.config)
# Set up common test data
self.user_id = "test_user"
self.test_filters = {"user_id": self.user_id}
def tearDown(self):
"""Tear down test fixtures after each test method."""
self.neptune_graph_patcher.stop()
self.create_embedding_model_patcher.stop()
self.create_llm_patcher.stop()
self.create_vector_store_patcher.stop()
def test_initialization(self):
"""Test that the MemoryGraph is initialized correctly."""
self.assertEqual(self.memory_graph.graph, self.mock_graph)
self.assertEqual(self.memory_graph.embedding_model, self.mock_embedding_model)
self.assertEqual(self.memory_graph.llm, self.mock_llm)
self.assertEqual(self.memory_graph.vector_store, self.mock_vector_store)
self.assertEqual(self.memory_graph.llm_provider, "openai_structured")
self.assertEqual(self.memory_graph.node_label, ":`__Entity__`")
self.assertEqual(self.memory_graph.threshold, 0.7)
self.assertEqual(self.memory_graph.vector_store_limit, 5)
def test_collection_name_variants(self):
"""Test all collection_name configuration variants."""
# Test 1: graph_store.config.collection_name is set
config1 = MagicMock()
config1.graph_store.config.endpoint = "neptune-db://test-graph"
config1.graph_store.config.base_label = True
config1.graph_store.config.collection_name = "custom_collection"
config1.llm.provider = "openai"
config1.graph_store.llm = None
config1.vector_store.provider = "qdrant"
config1.vector_store.config = MagicMock()
MemoryGraph(config1)
self.assertEqual(config1.vector_store.config.collection_name, "custom_collection")
# Test 2: vector_store.config.collection_name exists, graph_store.config.collection_name is None
config2 = MagicMock()
config2.graph_store.config.endpoint = "neptune-db://test-graph"
config2.graph_store.config.base_label = True
config2.graph_store.config.collection_name = None
config2.llm.provider = "openai"
config2.graph_store.llm = None
config2.vector_store.provider = "qdrant"
config2.vector_store.config = MagicMock()
config2.vector_store.config.collection_name = "existing_collection"
MemoryGraph(config2)
self.assertEqual(config2.vector_store.config.collection_name, "existing_collection_neptune_vector_store")
# Test 3: Neither collection_name is set (default case)
config3 = MagicMock()
config3.graph_store.config.endpoint = "neptune-db://test-graph"
config3.graph_store.config.base_label = True
config3.graph_store.config.collection_name = None
config3.llm.provider = "openai"
config3.graph_store.llm = None
config3.vector_store.provider = "qdrant"
config3.vector_store.config = MagicMock()
config3.vector_store.config.collection_name = None
MemoryGraph(config3)
self.assertEqual(config3.vector_store.config.collection_name, "mem0_neptune_vector_store")
def test_init(self):
"""Test the class init functions"""
# Create a mock config with bad endpoint
config_no_endpoint = MagicMock()
config_no_endpoint.graph_store.config.endpoint = None
# Create the MemoryGraph instance
with pytest.raises(ValueError):
MemoryGraph(config_no_endpoint)
# Create a mock config with wrong endpoint type
config_wrong_endpoint = MagicMock()
config_wrong_endpoint.graph_store.config.endpoint = "neptune-graph://test-graph"
with pytest.raises(ValueError):
MemoryGraph(config_wrong_endpoint)
def test_add_method(self):
"""Test the add method with mocked components."""
# Mock the necessary methods that add() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person", "bob": "person"})
self.memory_graph._establish_nodes_relations_from_data = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "destination": "bob"}]
)
self.memory_graph._search_graph_db = MagicMock(return_value=[])
self.memory_graph._get_delete_entities_from_search_output = MagicMock(return_value=[])
self.memory_graph._delete_entities = MagicMock(return_value=[])
self.memory_graph._add_entities = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "target": "bob"}]
)
# Call the add method
result = self.memory_graph.add("Alice knows Bob", self.test_filters)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Alice knows Bob", self.test_filters)
self.memory_graph._establish_nodes_relations_from_data.assert_called_once()
self.memory_graph._search_graph_db.assert_called_once()
self.memory_graph._get_delete_entities_from_search_output.assert_called_once()
self.memory_graph._delete_entities.assert_called_once_with([], self.user_id)
self.memory_graph._add_entities.assert_called_once()
# Check the result structure
self.assertIn("deleted_entities", result)
self.assertIn("added_entities", result)
def test_search_method(self):
"""Test the search method with mocked components."""
# Mock the necessary methods that search() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person"})
# Mock search results
mock_search_results = [
{"source": "alice", "relationship": "knows", "destination": "bob"},
{"source": "alice", "relationship": "works_with", "destination": "charlie"},
]
self.memory_graph._search_graph_db = MagicMock(return_value=mock_search_results)
# Mock BM25Okapi
with patch("mem0.graphs.neptune.base.BM25Okapi") as mock_bm25:
mock_bm25_instance = MagicMock()
mock_bm25.return_value = mock_bm25_instance
# Mock get_top_n to return reranked results
reranked_results = [["alice", "knows", "bob"], ["alice", "works_with", "charlie"]]
mock_bm25_instance.get_top_n.return_value = reranked_results
# Call the search method
result = self.memory_graph.search("Find Alice", self.test_filters, limit=5)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Find Alice", self.test_filters)
self.memory_graph._search_graph_db.assert_called_once_with(node_list=["alice"], filters=self.test_filters)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["destination"], "bob")
def test_get_all_method(self):
"""Test the get_all method."""
# Mock the _get_all_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"user_id": self.user_id, "limit": 10}
self.memory_graph._get_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [
{"source": "alice", "relationship": "knows", "target": "bob"},
{"source": "bob", "relationship": "works_with", "target": "charlie"},
]
self.mock_graph.query.return_value = mock_query_result
# Call the get_all method
result = self.memory_graph.get_all(self.test_filters, limit=10)
# Verify the method calls
self.memory_graph._get_all_cypher.assert_called_once_with(self.test_filters, 10)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["target"], "bob")
def test_delete_all_method(self):
"""Test the delete_all method."""
# Mock the _delete_all_cypher method
mock_cypher = "MATCH (n) DETACH DELETE n"
mock_params = {"user_id": self.user_id}
self.memory_graph._delete_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Call the delete_all method
self.memory_graph.delete_all(self.test_filters)
# Verify the method calls
self.memory_graph._delete_all_cypher.assert_called_once_with(self.test_filters)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
def test_search_source_node(self):
"""Test the _search_source_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_source_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_source_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_source_node method
result = self.memory_graph._search_source_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_source_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_destination_node(self):
"""Test the _search_destination_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_destination_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"destination_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_destination_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_destination_node method
result = self.memory_graph._search_destination_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_destination_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_graph_db(self):
"""Test the _search_graph_db method."""
# Mock node list
node_list = ["alice", "bob"]
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock the _search_graph_db_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"n_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.7, "limit": 10}
self.memory_graph._search_graph_db_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query results
mock_query_result1 = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
mock_query_result2 = [{"source": "bob", "relationship": "works_with", "destination": "charlie"}]
self.mock_graph.query.side_effect = [mock_query_result1, mock_query_result2]
# Call the _search_graph_db method
result = self.memory_graph._search_graph_db(node_list, self.test_filters, limit=10)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.assertEqual(self.memory_graph._search_graph_db_cypher.call_count, 2)
self.assertEqual(self.mock_graph.query.call_count, 2)
# Check the result
expected_result = mock_query_result1 + mock_query_result2
self.assertEqual(result, expected_result)
def test_add_entities(self):
"""Test the _add_entities method."""
# Mock data
to_be_added = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
entity_type_map = {"alice": "person", "bob": "person"}
# Mock embeddings
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock search results
mock_source_search = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
mock_dest_search = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
# Mock the search methods
self.memory_graph._search_source_node = MagicMock(return_value=mock_source_search)
self.memory_graph._search_destination_node = MagicMock(return_value=mock_dest_search)
# Mock the _add_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_id": 123, "destination_id": 456}
self.memory_graph._add_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _add_entities method
result = self.memory_graph._add_entities(to_be_added, self.user_id, entity_type_map)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.memory_graph._search_source_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._search_destination_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._add_entities_cypher.assert_called_once()
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
def test_delete_entities(self):
"""Test the _delete_entities method."""
# Mock data
to_be_deleted = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
# Mock the _delete_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_name": "alice", "dest_name": "bob", "user_id": self.user_id}
self.memory_graph._delete_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _delete_entities method
result = self.memory_graph._delete_entities(to_be_deleted, self.user_id)
# Verify the method calls
self.memory_graph._delete_entities_cypher.assert_called_once_with("alice", "bob", "knows", self.user_id)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_neptune_memory.py",
"license": "Apache License 2.0",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:examples/misc/test.py | from agents import Agent, Runner, enable_verbose_stdout_logging, function_tool
from dotenv import load_dotenv
from mem0 import MemoryClient
enable_verbose_stdout_logging()
load_dotenv()
# Initialize Mem0 client
mem0 = MemoryClient()
# Define memory tools for the agent
@function_tool
def search_memory(query: str, user_id: str) -> str:
"""Search through past conversations and memories"""
memories = mem0.search(query, user_id=user_id, limit=3)
if memories:
return "\n".join([f"- {mem['memory']}" for mem in memories])
return "No relevant memories found."
@function_tool
def save_memory(content: str, user_id: str) -> str:
"""Save important information to memory"""
mem0.add([{"role": "user", "content": content}], user_id=user_id)
return "Information saved to memory."
# Specialized agents
travel_agent = Agent(
name="Travel Planner",
instructions="""You are a travel planning specialist. Use get_user_context to
understand the user's travel preferences and history before making recommendations.
After providing your response, use store_conversation to save important details.""",
tools=[search_memory, save_memory],
model="gpt-4.1-nano-2025-04-14",
)
health_agent = Agent(
name="Health Advisor",
instructions="""You are a health and wellness advisor. Use get_user_context to
understand the user's health goals and dietary preferences.
After providing advice, use store_conversation to save relevant information.""",
tools=[search_memory, save_memory],
model="gpt-4.1-nano-2025-04-14",
)
# Triage agent with handoffs
triage_agent = Agent(
name="Personal Assistant",
instructions="""You are a helpful personal assistant that routes requests to specialists.
For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner.
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
For general questions, you can handle them directly using available tools.""",
handoffs=[travel_agent, health_agent],
model="gpt-4.1-nano-2025-04-14",
)
def chat_with_handoffs(user_input: str, user_id: str) -> str:
"""
Handle user input with automatic agent handoffs and memory integration.
Args:
user_input: The user's message
user_id: Unique identifier for the user
Returns:
The agent's response
"""
# Run the triage agent (it will automatically handoffs when needed)
result = Runner.run_sync(triage_agent, user_input)
# Store the original conversation in memory
conversation = [{"role": "user", "content": user_input}, {"role": "assistant", "content": result.final_output}]
mem0.add(conversation, user_id=user_id)
return result.final_output
# Example usage
# response = chat_with_handoffs("Which places should I vist?", user_id="alex")
# print(response)
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/test.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:examples/misc/vllm_example.py | """
Example of using vLLM with mem0 for high-performance memory operations.
SETUP INSTRUCTIONS:
1. Install vLLM:
pip install vllm
2. Start vLLM server (in a separate terminal):
vllm serve microsoft/DialoGPT-small --port 8000
Wait for the message: "Uvicorn running on http://0.0.0.0:8000"
(Small model: ~500MB download, much faster!)
3. Verify server is running:
curl http://localhost:8000/health
4. Run this example:
python examples/misc/vllm_example.py
Optional environment variables:
export VLLM_BASE_URL="http://localhost:8000/v1"
export VLLM_API_KEY="vllm-api-key"
"""
from mem0 import Memory
# Configuration for vLLM integration
config = {
"llm": {
"provider": "vllm",
"config": {
"model": "Qwen/Qwen2.5-32B-Instruct",
"vllm_base_url": "http://localhost:8000/v1",
"api_key": "vllm-api-key",
"temperature": 0.7,
"max_tokens": 100,
},
},
"embedder": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
"vector_store": {
"provider": "qdrant",
"config": {"collection_name": "vllm_memories", "host": "localhost", "port": 6333},
},
}
def main():
"""
Demonstrate vLLM integration with mem0
"""
print("--> Initializing mem0 with vLLM...")
# Initialize memory with vLLM
memory = Memory.from_config(config)
print("--> Memory initialized successfully!")
# Example conversations to store
conversations = [
{
"messages": [
{"role": "user", "content": "I love playing chess on weekends"},
{
"role": "assistant",
"content": "That's great! Chess is an excellent strategic game that helps improve critical thinking.",
},
],
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I'm learning Python programming"},
{
"role": "assistant",
"content": "Python is a fantastic language for beginners! What specific areas are you focusing on?",
},
],
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I prefer working late at night, I'm more productive then"},
{
"role": "assistant",
"content": "Many people find they're more creative and focused during nighttime hours. It's important to maintain a consistent schedule that works for you.",
},
],
"user_id": "user_123",
},
]
print("\n--> Adding memories using vLLM...")
# Add memories - now powered by vLLM's high-performance inference
for i, conversation in enumerate(conversations, 1):
result = memory.add(messages=conversation["messages"], user_id=conversation["user_id"])
print(f"Memory {i} added: {result}")
print("\n🔍 Searching memories...")
# Search memories - vLLM will process the search and memory operations
search_queries = [
"What does the user like to do on weekends?",
"What is the user learning?",
"When is the user most productive?",
]
for query in search_queries:
print(f"\nQuery: {query}")
memories = memory.search(query=query, user_id="user_123")
for memory_item in memories:
print(f" - {memory_item['memory']}")
print("\n--> Getting all memories for user...")
all_memories = memory.get_all(user_id="user_123")
print(f"Total memories stored: {len(all_memories)}")
for memory_item in all_memories:
print(f" - {memory_item['memory']}")
print("\n--> vLLM integration demo completed successfully!")
print("\nBenefits of using vLLM:")
print(" -> 2.7x higher throughput compared to standard implementations")
print(" -> 5x faster time-per-output-token")
print(" -> Efficient memory usage with PagedAttention")
print(" -> Simple configuration, same as other providers")
if __name__ == "__main__":
try:
main()
except Exception as e:
print(f"=> Error: {e}")
print("\nTroubleshooting:")
print("1. Make sure vLLM server is running: vllm serve microsoft/DialoGPT-small --port 8000")
print("2. Check if the model is downloaded and accessible")
print("3. Verify the base URL and port configuration")
print("4. Ensure you have the required dependencies installed")
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/vllm_example.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/llms/test_vllm.py | from unittest.mock import MagicMock, Mock, patch
import pytest
from mem0 import AsyncMemory, Memory
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.vllm import VllmLLM
@pytest.fixture
def mock_vllm_client():
with patch("mem0.llms.vllm.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_vllm_client):
config = BaseLlmConfig(model="Qwen/Qwen2.5-32B-Instruct", temperature=0.7, max_tokens=100, top_p=1.0)
llm = VllmLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_vllm_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_vllm_client.chat.completions.create.assert_called_once_with(
model="Qwen/Qwen2.5-32B-Instruct", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_vllm_client):
config = BaseLlmConfig(model="Qwen/Qwen2.5-32B-Instruct", temperature=0.7, max_tokens=100, top_p=1.0)
llm = VllmLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_vllm_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_vllm_client.chat.completions.create.assert_called_once_with(
model="Qwen/Qwen2.5-32B-Instruct",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
def create_mocked_memory():
"""Create a fully mocked Memory instance for testing."""
with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \
patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \
patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \
patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:
mock_llm = MagicMock()
mock_llm_factory.return_value = mock_llm
mock_embedder = MagicMock()
mock_embedder.embed.return_value = [0.1, 0.2, 0.3]
mock_embedder_factory.return_value = mock_embedder
mock_vector_store = MagicMock()
mock_vector_store.search.return_value = []
mock_vector_store.add.return_value = None
mock_vector_factory.return_value = mock_vector_store
mock_sqlite.return_value = MagicMock()
memory = Memory()
memory.api_version = "v1.0"
return memory, mock_llm, mock_vector_store
def create_mocked_async_memory():
"""Create a fully mocked AsyncMemory instance for testing."""
with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \
patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \
patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \
patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:
mock_llm = MagicMock()
mock_llm_factory.return_value = mock_llm
mock_embedder = MagicMock()
mock_embedder.embed.return_value = [0.1, 0.2, 0.3]
mock_embedder_factory.return_value = mock_embedder
mock_vector_store = MagicMock()
mock_vector_store.search.return_value = []
mock_vector_store.add.return_value = None
mock_vector_factory.return_value = mock_vector_store
mock_sqlite.return_value = MagicMock()
memory = AsyncMemory()
memory.api_version = "v1.0"
return memory, mock_llm, mock_vector_store
def test_thinking_tags_sync():
"""Test thinking tags handling in Memory._add_to_vector_store (sync)."""
memory, mock_llm, mock_vector_store = create_mocked_memory()
# Mock LLM responses for both phases
mock_llm.generate_response.side_effect = [
' <think>Sync fact extraction</think> \n{"facts": ["User loves sci-fi"]}',
' <think>Sync memory actions</think> \n{"memory": [{"text": "Loves sci-fi", "event": "ADD"}]}'
]
mock_vector_store.search.return_value = []
result = memory._add_to_vector_store(
messages=[{"role": "user", "content": "I love sci-fi movies"}],
metadata={},
filters={},
infer=True
)
assert len(result) == 1
assert result[0]["memory"] == "Loves sci-fi"
assert result[0]["event"] == "ADD"
@pytest.mark.asyncio
async def test_async_thinking_tags_async():
"""Test thinking tags handling in AsyncMemory._add_to_vector_store."""
memory, mock_llm, mock_vector_store = create_mocked_async_memory()
# Directly mock llm.generate_response instead of via asyncio.to_thread
mock_llm.generate_response.side_effect = [
' <think>Async fact extraction</think> \n{"facts": ["User loves sci-fi"]}',
' <think>Async memory actions</think> \n{"memory": [{"text": "Loves sci-fi", "event": "ADD"}]}'
]
# Mock asyncio.to_thread to call the function directly (bypass threading)
async def mock_to_thread(func, *args, **kwargs):
if func == mock_llm.generate_response:
return func(*args, **kwargs)
elif hasattr(func, '__name__') and 'embed' in func.__name__:
return [0.1, 0.2, 0.3]
elif hasattr(func, '__name__') and 'search' in func.__name__:
return []
else:
return func(*args, **kwargs)
with patch('mem0.memory.main.asyncio.to_thread', side_effect=mock_to_thread):
result = await memory._add_to_vector_store(
messages=[{"role": "user", "content": "I love sci-fi movies"}],
metadata={},
effective_filters={},
infer=True
)
assert len(result) == 1
assert result[0]["memory"] == "Loves sci-fi"
assert result[0]["event"] == "ADD" | {
"repo_id": "mem0ai/mem0",
"file_path": "tests/llms/test_vllm.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:mem0/configs/vector_stores/baidu.py | from typing import Any, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
class BaiduDBConfig(BaseModel):
endpoint: str = Field("http://localhost:8287", description="Endpoint URL for Baidu VectorDB")
account: str = Field("root", description="Account for Baidu VectorDB")
api_key: str = Field(None, description="API Key for Baidu VectorDB")
database_name: str = Field("mem0", description="Name of the database")
table_name: str = Field("mem0", description="Name of the table")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
metric_type: str = Field("L2", description="Metric type for similarity search")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/baidu.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:tests/vector_stores/test_baidu.py | from unittest.mock import Mock, PropertyMock, patch
import pytest
from pymochow.exception import ServerError
from pymochow.model.enum import ServerErrCode, TableState
from pymochow.model.table import (
FloatVector,
Table,
VectorSearchConfig,
VectorTopkSearchRequest,
)
from mem0.vector_stores.baidu import BaiduDB
@pytest.fixture
def mock_mochow_client():
with patch("pymochow.MochowClient") as mock_client:
yield mock_client
@pytest.fixture
def mock_configuration():
with patch("pymochow.configuration.Configuration") as mock_config:
yield mock_config
@pytest.fixture
def mock_bce_credentials():
with patch("pymochow.auth.bce_credentials.BceCredentials") as mock_creds:
yield mock_creds
@pytest.fixture
def mock_table():
mock_table = Mock(spec=Table)
# 设置 Table 类的属性
type(mock_table).database_name = PropertyMock(return_value="test_db")
type(mock_table).table_name = PropertyMock(return_value="test_table")
type(mock_table).schema = PropertyMock(return_value=Mock())
type(mock_table).replication = PropertyMock(return_value=1)
type(mock_table).partition = PropertyMock(return_value=Mock())
type(mock_table).enable_dynamic_field = PropertyMock(return_value=False)
type(mock_table).description = PropertyMock(return_value="")
type(mock_table).create_time = PropertyMock(return_value="")
type(mock_table).state = PropertyMock(return_value=TableState.NORMAL)
type(mock_table).aliases = PropertyMock(return_value=[])
return mock_table
@pytest.fixture
def mochow_instance(mock_mochow_client, mock_configuration, mock_bce_credentials, mock_table):
mock_database = Mock()
mock_client_instance = Mock()
# Mock the client creation
mock_mochow_client.return_value = mock_client_instance
# Mock database operations
mock_client_instance.list_databases.return_value = []
mock_client_instance.create_database.return_value = mock_database
mock_client_instance.database.return_value = mock_database
# Mock table operations
mock_database.list_table.return_value = []
mock_database.create_table.return_value = mock_table
mock_database.describe_table.return_value = Mock(state=TableState.NORMAL)
mock_database.table.return_value = mock_table
return BaiduDB(
endpoint="http://localhost:8287",
account="test_account",
api_key="test_api_key",
database_name="test_db",
table_name="test_table",
embedding_model_dims=128,
metric_type="COSINE",
)
def test_insert(mochow_instance, mock_mochow_client):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
mochow_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
# Verify table.upsert was called with correct data
assert mochow_instance._table.upsert.call_count == 2
calls = mochow_instance._table.upsert.call_args_list
# Check first call
first_row = calls[0][1]["rows"][0]
assert first_row._data["id"] == "id1"
assert first_row._data["vector"] == [0.1, 0.2, 0.3]
assert first_row._data["metadata"] == {"name": "vector1"}
# Check second call
second_row = calls[1][1]["rows"][0]
assert second_row._data["id"] == "id2"
assert second_row._data["vector"] == [0.4, 0.5, 0.6]
assert second_row._data["metadata"] == {"name": "vector2"}
def test_search(mochow_instance, mock_mochow_client):
# Mock search results
mock_search_results = Mock()
mock_search_results.rows = [
{"row": {"id": "id1", "metadata": {"name": "vector1"}}, "score": 0.1},
{"row": {"id": "id2", "metadata": {"name": "vector2"}}, "score": 0.2},
]
mochow_instance._table.vector_search.return_value = mock_search_results
vectors = [0.1, 0.2, 0.3]
results = mochow_instance.search(query="test", vectors=vectors, limit=2)
# Verify search was called with correct parameters
mochow_instance._table.vector_search.assert_called_once()
call_args = mochow_instance._table.vector_search.call_args
request = call_args[0][0] if call_args[0] else call_args[1]["request"]
assert isinstance(request, VectorTopkSearchRequest)
assert request._vector_field == "vector"
assert isinstance(request._vector, FloatVector)
assert request._vector._floats == vectors
assert request._limit == 2
assert isinstance(request._config, VectorSearchConfig)
assert request._config._ef == 200
# Verify results
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.1
assert results[0].payload == {"name": "vector1"}
assert results[1].id == "id2"
assert results[1].score == 0.2
assert results[1].payload == {"name": "vector2"}
def test_search_with_filters(mochow_instance, mock_mochow_client):
mochow_instance._table.vector_search.return_value = Mock(rows=[])
vectors = [0.1, 0.2, 0.3]
filters = {"user_id": "user123", "agent_id": "agent456"}
mochow_instance.search(query="test", vectors=vectors, limit=2, filters=filters)
# Verify search was called with filter
call_args = mochow_instance._table.vector_search.call_args
request = call_args[0][0] if call_args[0] else call_args[1]["request"]
assert request._filter == 'metadata["user_id"] = "user123" AND metadata["agent_id"] = "agent456"'
def test_delete(mochow_instance, mock_mochow_client):
vector_id = "id1"
mochow_instance.delete(vector_id=vector_id)
mochow_instance._table.delete.assert_called_once_with(primary_key={"id": vector_id})
def test_update(mochow_instance, mock_mochow_client):
vector_id = "id1"
new_vector = [0.7, 0.8, 0.9]
new_payload = {"name": "updated_vector"}
mochow_instance.update(vector_id=vector_id, vector=new_vector, payload=new_payload)
mochow_instance._table.upsert.assert_called_once()
call_args = mochow_instance._table.upsert.call_args
row = call_args[0][0] if call_args[0] else call_args[1]["rows"][0]
assert row._data["id"] == vector_id
assert row._data["vector"] == new_vector
assert row._data["metadata"] == new_payload
def test_get(mochow_instance, mock_mochow_client):
# Mock query result
mock_result = Mock()
mock_result.row = {"id": "id1", "metadata": {"name": "vector1"}}
mochow_instance._table.query.return_value = mock_result
result = mochow_instance.get(vector_id="id1")
mochow_instance._table.query.assert_called_once_with(primary_key={"id": "id1"}, projections=["id", "metadata"])
assert result.id == "id1"
assert result.score is None
assert result.payload == {"name": "vector1"}
def test_list(mochow_instance, mock_mochow_client):
# Mock select result
mock_result = Mock()
mock_result.rows = [{"id": "id1", "metadata": {"name": "vector1"}}, {"id": "id2", "metadata": {"name": "vector2"}}]
mochow_instance._table.select.return_value = mock_result
results = mochow_instance.list(limit=2)
mochow_instance._table.select.assert_called_once_with(filter=None, projections=["id", "metadata"], limit=2)
assert len(results[0]) == 2
assert results[0][0].id == "id1"
assert results[0][1].id == "id2"
def test_list_cols(mochow_instance, mock_mochow_client):
# Mock table list
mock_tables = [
Mock(spec=Table, database_name="test_db", table_name="table1"),
Mock(spec=Table, database_name="test_db", table_name="table2"),
]
mochow_instance._database.list_table.return_value = mock_tables
result = mochow_instance.list_cols()
assert result == ["table1", "table2"]
def test_delete_col_not_exists(mochow_instance, mock_mochow_client):
# 使用正确的 ServerErrCode 枚举值
mochow_instance._database.drop_table.side_effect = ServerError(
"Table not exists", code=ServerErrCode.TABLE_NOT_EXIST
)
# Should not raise exception
mochow_instance.delete_col()
def test_col_info(mochow_instance, mock_mochow_client):
mock_table_info = {"table_name": "test_table", "fields": []}
mochow_instance._table.stats.return_value = mock_table_info
result = mochow_instance.col_info()
assert result == mock_table_info
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_baidu.py",
"license": "Apache License 2.0",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:examples/misc/diet_assistant_voice_cartesia.py | """Simple Voice Agent with Memory: Personal Food Assistant.
A food assistant that remembers your dietary preferences and speaks recommendations
Powered by Agno + Cartesia + Mem0
export MEM0_API_KEY=your_mem0_api_key
export OPENAI_API_KEY=your_openai_api_key
export CARTESIA_API_KEY=your_cartesia_api_key
"""
from textwrap import dedent
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.cartesia import CartesiaTools
from agno.utils.audio import write_audio_to_file
from mem0 import MemoryClient
memory_client = MemoryClient()
USER_ID = "food_user_01"
# Agent instructions
agent_instructions = dedent(
"""Follow these steps SEQUENTIALLY to provide personalized food recommendations with voice:
1. Analyze the user's food request and identify what type of recommendation they need.
2. Consider their dietary preferences, restrictions, and cooking habits from memory context.
3. Generate a personalized food recommendation based on their stored preferences.
4. Analyze the appropriate tone for the response (helpful, enthusiastic, cautious for allergies).
5. Call `list_voices` to retrieve available voices.
6. Select a voice that matches the helpful, friendly tone.
7. Call `text_to_speech` to generate the final audio recommendation.
"""
)
# Simple agent that remembers food preferences
food_agent = Agent(
name="Personal Food Assistant",
description="Provides personalized food recommendations with memory and generates voice responses using Cartesia TTS tools.",
instructions=agent_instructions,
model=OpenAIChat(id="gpt-4.1-nano-2025-04-14"),
tools=[CartesiaTools(voice_localize_enabled=True)],
show_tool_calls=True,
)
def get_food_recommendation(user_query: str, user_id):
"""Get food recommendation with memory context"""
# Search memory for relevant food preferences
memories_result = memory_client.search(query=user_query, user_id=user_id, limit=5)
# Add memory context to the message
memories = [f"- {result['memory']}" for result in memories_result]
memory_context = "Memories about user that might be relevant:\n" + "\n".join(memories)
# Combine memory context with user request
full_request = f"""
{memory_context}
User: {user_query}
Answer the user query based on provided context and create a voice note.
"""
# Generate response with voice (same pattern as translator)
food_agent.print_response(full_request)
response = food_agent.run_response
# Save audio file
if response.audio:
import time
timestamp = int(time.time())
filename = f"food_recommendation_{timestamp}.mp3"
write_audio_to_file(
response.audio[0].base64_audio,
filename=filename,
)
print(f"Audio saved as {filename}")
return response.content
def initialize_food_memory(user_id):
"""Initialize memory with food preferences"""
messages = [
{
"role": "user",
"content": "Hi, I'm Sarah. I'm vegetarian and lactose intolerant. I love spicy food, especially Thai and Indian cuisine.",
},
{
"role": "assistant",
"content": "Hello Sarah! I've noted that you're vegetarian, lactose intolerant, and love spicy Thai and Indian food.",
},
{
"role": "user",
"content": "I prefer quick breakfasts since I'm always rushing, but I like cooking elaborate dinners. I also meal prep on Sundays.",
},
{
"role": "assistant",
"content": "Got it! Quick breakfasts, elaborate dinners, and Sunday meal prep. I'll remember this for future recommendations.",
},
{
"role": "user",
"content": "I'm trying to eat more protein. I like quinoa, lentils, chickpeas, and tofu. I hate mushrooms though.",
},
{
"role": "assistant",
"content": "Perfect! I'll focus on protein-rich options like quinoa, lentils, chickpeas, and tofu, and avoid mushrooms.",
},
]
memory_client.add(messages, user_id=user_id)
print("Food preferences stored in memory")
# Initialize the memory for the user once in order for the agent to learn the user preference
initialize_food_memory(user_id=USER_ID)
print(
get_food_recommendation(
"Which type of restaurants should I go tonight for dinner and cuisines preferred?", user_id=USER_ID
)
)
# OUTPUT: 🎵 Audio saved as food_recommendation_1750162610.mp3
# For dinner tonight, considering your love for healthy spic optionsy, you could try a nice Thai, Indian, or Mexican restaurant.
# You might find dishes with quinoa, chickpeas, tofu, and fresh herbs delightful. Enjoy your dinner!
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/diet_assistant_voice_cartesia.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:mem0/configs/vector_stores/mongodb.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class MongoDBConfig(BaseModel):
"""Configuration for MongoDB vector database."""
db_name: str = Field("mem0_db", description="Name of the MongoDB database")
collection_name: str = Field("mem0", description="Name of the MongoDB collection")
embedding_model_dims: Optional[int] = Field(1536, description="Dimensions of the embedding vectors")
mongo_uri: str = Field("mongodb://localhost:27017", description="MongoDB URI. Default is mongodb://localhost:27017")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please provide only the following fields: {', '.join(allowed_fields)}."
)
return values
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/configs/vector_stores/mongodb.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/vector_stores/mongodb.py | import logging
from importlib.metadata import version
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
try:
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
from pymongo.errors import PyMongoError
from pymongo.operations import SearchIndexModel
except ImportError:
raise ImportError("The 'pymongo' library is required. Please install it using 'pip install pymongo'.")
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
_DRIVER_METADATA = DriverInfo(name="Mem0", version=version("mem0ai"))
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class MongoDB(VectorStoreBase):
VECTOR_TYPE = "knnVector"
SIMILARITY_METRIC = "cosine"
def __init__(self, db_name: str, collection_name: str, embedding_model_dims: int, mongo_uri: str):
"""
Initialize the MongoDB vector store with vector search capabilities.
Args:
db_name (str): Database name
collection_name (str): Collection name
embedding_model_dims (int): Dimension of the embedding vector
mongo_uri (str): MongoDB connection URI
"""
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.db_name = db_name
self.client = MongoClient(mongo_uri, driver=_DRIVER_METADATA)
self.db = self.client[db_name]
self.collection = self.create_col()
def create_col(self):
"""Create new collection with vector search index."""
try:
database = self.client[self.db_name]
collection_names = database.list_collection_names()
if self.collection_name not in collection_names:
logger.info(f"Collection '{self.collection_name}' does not exist. Creating it now.")
collection = database[self.collection_name]
# Insert and remove a placeholder document to create the collection
collection.insert_one({"_id": 0, "placeholder": True})
collection.delete_one({"_id": 0})
logger.info(f"Collection '{self.collection_name}' created successfully.")
else:
collection = database[self.collection_name]
self.index_name = f"{self.collection_name}_vector_index"
found_indexes = list(collection.list_search_indexes(name=self.index_name))
if found_indexes:
logger.info(f"Search index '{self.index_name}' already exists in collection '{self.collection_name}'.")
else:
search_index_model = SearchIndexModel(
name=self.index_name,
definition={
"mappings": {
"dynamic": False,
"fields": {
"embedding": {
"type": self.VECTOR_TYPE,
"dimensions": self.embedding_model_dims,
"similarity": self.SIMILARITY_METRIC,
}
},
}
},
)
collection.create_search_index(search_index_model)
logger.info(
f"Search index '{self.index_name}' created successfully for collection '{self.collection_name}'."
)
return collection
except PyMongoError as e:
logger.error(f"Error creating collection and search index: {e}")
return None
def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> None:
"""
Insert vectors into the collection.
Args:
vectors (List[List[float]]): List of vectors to insert.
payloads (List[Dict], optional): List of payloads corresponding to vectors.
ids (List[str], optional): List of IDs corresponding to vectors.
"""
logger.info(f"Inserting {len(vectors)} vectors into collection '{self.collection_name}'.")
data = []
for vector, payload, _id in zip(vectors, payloads or [{}] * len(vectors), ids or [None] * len(vectors)):
document = {"_id": _id, "embedding": vector, "payload": payload}
data.append(document)
try:
self.collection.insert_many(data)
logger.info(f"Inserted {len(data)} documents into '{self.collection_name}'.")
except PyMongoError as e:
logger.error(f"Error inserting data: {e}")
def search(self, query: str, vectors: List[float], limit=5, filters: Optional[Dict] = None) -> List[OutputData]:
"""
Search for similar vectors using the vector search index.
Args:
query (str): Query string
vectors (List[float]): Query vector.
limit (int, optional): Number of results to return. Defaults to 5.
filters (Dict, optional): Filters to apply to the search.
Returns:
List[OutputData]: Search results.
"""
found_indexes = list(self.collection.list_search_indexes(name=self.index_name))
if not found_indexes:
logger.error(f"Index '{self.index_name}' does not exist.")
return []
results = []
try:
collection = self.client[self.db_name][self.collection_name]
pipeline = [
{
"$vectorSearch": {
"index": self.index_name,
"limit": limit,
"numCandidates": limit,
"queryVector": vectors,
"path": "embedding",
}
},
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
{"$project": {"embedding": 0}},
]
# Add filter stage if filters are provided
if filters:
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"payload." + key: value})
if filter_conditions:
# Add a $match stage after vector search to apply filters
pipeline.insert(1, {"$match": {"$and": filter_conditions}})
results = list(collection.aggregate(pipeline))
logger.info(f"Vector search completed. Found {len(results)} documents.")
except Exception as e:
logger.error(f"Error during vector search for query {query}: {e}")
return []
output = [OutputData(id=str(doc["_id"]), score=doc.get("score"), payload=doc.get("payload")) for doc in results]
return output
def delete(self, vector_id: str) -> None:
"""
Delete a vector by ID.
Args:
vector_id (str): ID of the vector to delete.
"""
try:
result = self.collection.delete_one({"_id": vector_id})
if result.deleted_count > 0:
logger.info(f"Deleted document with ID '{vector_id}'.")
else:
logger.warning(f"No document found with ID '{vector_id}' to delete.")
except PyMongoError as e:
logger.error(f"Error deleting document: {e}")
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
"""
Update a vector and its payload.
Args:
vector_id (str): ID of the vector to update.
vector (List[float], optional): Updated vector.
payload (Dict, optional): Updated payload.
"""
update_fields = {}
if vector is not None:
update_fields["embedding"] = vector
if payload is not None:
update_fields["payload"] = payload
if update_fields:
try:
result = self.collection.update_one({"_id": vector_id}, {"$set": update_fields})
if result.matched_count > 0:
logger.info(f"Updated document with ID '{vector_id}'.")
else:
logger.warning(f"No document found with ID '{vector_id}' to update.")
except PyMongoError as e:
logger.error(f"Error updating document: {e}")
def get(self, vector_id: str) -> Optional[OutputData]:
"""
Retrieve a vector by ID.
Args:
vector_id (str): ID of the vector to retrieve.
Returns:
Optional[OutputData]: Retrieved vector or None if not found.
"""
try:
doc = self.collection.find_one({"_id": vector_id})
if doc:
logger.info(f"Retrieved document with ID '{vector_id}'.")
return OutputData(id=str(doc["_id"]), score=None, payload=doc.get("payload"))
else:
logger.warning(f"Document with ID '{vector_id}' not found.")
return None
except PyMongoError as e:
logger.error(f"Error retrieving document: {e}")
return None
def list_cols(self) -> List[str]:
"""
List all collections in the database.
Returns:
List[str]: List of collection names.
"""
try:
collections = self.db.list_collection_names()
logger.info(f"Listing collections in database '{self.db_name}': {collections}")
return collections
except PyMongoError as e:
logger.error(f"Error listing collections: {e}")
return []
def delete_col(self) -> None:
"""Delete the collection."""
try:
self.collection.drop()
logger.info(f"Deleted collection '{self.collection_name}'.")
except PyMongoError as e:
logger.error(f"Error deleting collection: {e}")
def col_info(self) -> Dict[str, Any]:
"""
Get information about the collection.
Returns:
Dict[str, Any]: Collection information.
"""
try:
stats = self.db.command("collstats", self.collection_name)
info = {"name": self.collection_name, "count": stats.get("count"), "size": stats.get("size")}
logger.info(f"Collection info: {info}")
return info
except PyMongoError as e:
logger.error(f"Error getting collection info: {e}")
return {}
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
"""
List vectors in the collection.
Args:
filters (Dict, optional): Filters to apply to the list.
limit (int, optional): Number of vectors to return.
Returns:
List[OutputData]: List of vectors.
"""
try:
query = {}
if filters:
# Apply filters to the payload field
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"payload." + key: value})
if filter_conditions:
query = {"$and": filter_conditions}
cursor = self.collection.find(query).limit(limit)
results = [OutputData(id=str(doc["_id"]), score=None, payload=doc.get("payload")) for doc in cursor]
logger.info(f"Retrieved {len(results)} documents from collection '{self.collection_name}'.")
return results
except PyMongoError as e:
logger.error(f"Error listing documents: {e}")
return []
def reset(self):
"""Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.collection = self.create_col(self.collection_name)
def __del__(self) -> None:
"""Close the database connection when the object is deleted."""
if hasattr(self, "client"):
self.client.close()
logger.info("MongoClient connection closed.")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/vector_stores/mongodb.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/vector_stores/test_mongodb.py | from unittest.mock import MagicMock, patch
import pytest
from mem0.vector_stores.mongodb import MongoDB
@pytest.fixture
@patch("mem0.vector_stores.mongodb.MongoClient")
def mongo_vector_fixture(mock_mongo_client):
mock_client = mock_mongo_client.return_value
mock_db = mock_client["test_db"]
mock_collection = mock_db["test_collection"]
mock_collection.list_search_indexes.return_value = []
mock_collection.aggregate.return_value = []
mock_collection.find_one.return_value = None
# Create a proper mock cursor
mock_cursor = MagicMock()
mock_cursor.limit.return_value = mock_cursor
mock_collection.find.return_value = mock_cursor
mock_db.list_collection_names.return_value = []
mongo_vector = MongoDB(
db_name="test_db",
collection_name="test_collection",
embedding_model_dims=1536,
mongo_uri="mongodb://username:password@localhost:27017",
)
return mongo_vector, mock_collection, mock_db
def test_initalize_create_col(mongo_vector_fixture):
mongo_vector, mock_collection, mock_db = mongo_vector_fixture
assert mongo_vector.collection_name == "test_collection"
assert mongo_vector.embedding_model_dims == 1536
assert mongo_vector.db_name == "test_db"
# Verify create_col being called
mock_db.list_collection_names.assert_called_once()
mock_collection.insert_one.assert_called_once_with({"_id": 0, "placeholder": True})
mock_collection.delete_one.assert_called_once_with({"_id": 0})
assert mongo_vector.index_name == "test_collection_vector_index"
mock_collection.list_search_indexes.assert_called_once_with(name="test_collection_vector_index")
mock_collection.create_search_index.assert_called_once()
args, _ = mock_collection.create_search_index.call_args
search_index_model = args[0].document
assert search_index_model == {
"name": "test_collection_vector_index",
"definition": {
"mappings": {
"dynamic": False,
"fields": {
"embedding": {
"type": "knnVector",
"dimensions": 1536,
"similarity": "cosine",
}
},
}
},
}
assert mongo_vector.collection == mock_collection
def test_insert(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vectors = [[0.1] * 1536, [0.2] * 1536]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
mongo_vector.insert(vectors, payloads, ids)
expected_records = [
({"_id": ids[0], "embedding": vectors[0], "payload": payloads[0]}),
({"_id": ids[1], "embedding": vectors[1], "payload": payloads[1]}),
]
mock_collection.insert_many.assert_called_once_with(expected_records)
def test_search(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"key": "value1"}},
{"_id": "id2", "score": 0.8, "payload": {"key": "value2"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
results = mongo_vector.search("query_str", query_vector, limit=2)
mock_collection.list_search_indexes.assert_called_with(name="test_collection_vector_index")
mock_collection.aggregate.assert_called_once_with(
[
{
"$vectorSearch": {
"index": "test_collection_vector_index",
"limit": 2,
"numCandidates": 2,
"queryVector": query_vector,
"path": "embedding",
},
},
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
{"$project": {"embedding": 0}},
]
)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.9
assert results[0].payload == {"key": "value1"}
def test_search_with_filters(mongo_vector_fixture):
"""Test search with agent_id and run_id filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = mongo_vector.search("query_str", query_vector, limit=2, filters=filters)
# Verify that the aggregation pipeline includes the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the pipeline has the expected stages
assert len(pipeline) == 4 # vectorSearch, match, set, project
# Check that the match stage is present with the correct filters
match_stage = pipeline[1]
assert "$match" in match_stage
assert match_stage["$match"]["$and"] == [
{"payload.user_id": "alice"},
{"payload.agent_id": "agent1"},
{"payload.run_id": "run1"}
]
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_search_with_single_filter(mongo_vector_fixture):
"""Test search with single filter."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"user_id": "alice"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
filters = {"user_id": "alice"}
results = mongo_vector.search("query_str", query_vector, limit=2, filters=filters)
# Verify that the aggregation pipeline includes the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the match stage is present with the correct filter
match_stage = pipeline[1]
assert "$match" in match_stage
assert match_stage["$match"]["$and"] == [{"payload.user_id": "alice"}]
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_search_with_no_filters(mongo_vector_fixture):
"""Test search with no filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"key": "value1"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
results = mongo_vector.search("query_str", query_vector, limit=2, filters=None)
# Verify that the aggregation pipeline does not include the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the pipeline has only the expected stages (no match stage)
assert len(pipeline) == 3 # vectorSearch, set, project
assert len(results) == 1
def test_delete(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
mock_collection.delete_one.return_value = MagicMock(deleted_count=1)
# Reset the mock to clear calls from fixture setup
mock_collection.delete_one.reset_mock()
mongo_vector.delete(vector_id=vector_id)
mock_collection.delete_one.assert_called_once_with({"_id": vector_id})
def test_update(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
updated_vector = [0.3] * 1536
updated_payload = {"name": "updated_vector"}
mock_collection.update_one.return_value = MagicMock(matched_count=1)
mongo_vector.update(vector_id=vector_id, vector=updated_vector, payload=updated_payload)
mock_collection.update_one.assert_called_once_with(
{"_id": vector_id}, {"$set": {"embedding": updated_vector, "payload": updated_payload}}
)
def test_get(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
mock_collection.find_one.return_value = {"_id": vector_id, "payload": {"key": "value"}}
result = mongo_vector.get(vector_id=vector_id)
mock_collection.find_one.assert_called_once_with({"_id": vector_id})
assert result.id == vector_id
assert result.payload == {"key": "value"}
def test_list_cols(mongo_vector_fixture):
mongo_vector, _, mock_db = mongo_vector_fixture
mock_db.list_collection_names.return_value = ["collection1", "collection2"]
# Reset the mock to clear calls from fixture setup
mock_db.list_collection_names.reset_mock()
result = mongo_vector.list_cols()
mock_db.list_collection_names.assert_called_once()
assert result == ["collection1", "collection2"]
def test_delete_col(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
mongo_vector.delete_col()
mock_collection.drop.assert_called_once()
def test_col_info(mongo_vector_fixture):
mongo_vector, mock_collection, mock_db = mongo_vector_fixture
mock_db.command.return_value = {"count": 10, "size": 1024}
result = mongo_vector.col_info()
mock_db.command.assert_called_once_with("collstats", "test_collection")
assert result["name"] == "test_collection"
assert result["count"] == 10
assert result["size"] == 1024
def test_list(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"key": "value1"}},
{"_id": "id2", "payload": {"key": "value2"}},
]
results = mongo_vector.list(limit=2)
mock_collection.find.assert_called_once_with({})
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].payload == {"key": "value1"}
def test_list_with_filters(mongo_vector_fixture):
"""Test list with agent_id and run_id filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}},
]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = mongo_vector.list(filters=filters, limit=2)
# Verify that the find method was called with the correct query
expected_query = {
"$and": [
{"payload.user_id": "alice"},
{"payload.agent_id": "agent1"},
{"payload.run_id": "run1"}
]
}
mock_collection.find.assert_called_once_with(expected_query)
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_list_with_single_filter(mongo_vector_fixture):
"""Test list with single filter."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"user_id": "alice"}},
]
filters = {"user_id": "alice"}
results = mongo_vector.list(filters=filters, limit=2)
# Verify that the find method was called with the correct query
expected_query = {
"$and": [
{"payload.user_id": "alice"}
]
}
mock_collection.find.assert_called_once_with(expected_query)
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_list_with_no_filters(mongo_vector_fixture):
"""Test list with no filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"key": "value1"}},
]
results = mongo_vector.list(filters=None, limit=2)
# Verify that the find method was called with empty query
mock_collection.find.assert_called_once_with({})
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/vector_stores/test_mongodb.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mem0ai/mem0:openmemory/api/alembic/versions/afd00efbd06b_add_unique_user_id_constraints.py | """remove_global_unique_constraint_on_app_name_add_composite_unique
Revision ID: afd00efbd06b
Revises: add_config_table
Create Date: 2025-06-04 01:59:41.637440
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'afd00efbd06b'
down_revision: Union[str, None] = 'add_config_table'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_apps_name', table_name='apps')
op.create_index(op.f('ix_apps_name'), 'apps', ['name'], unique=False)
op.create_index('idx_app_owner_name', 'apps', ['owner_id', 'name'], unique=True)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_app_owner_name', table_name='apps')
op.drop_index(op.f('ix_apps_name'), table_name='apps')
op.create_index('ix_apps_name', 'apps', ['name'], unique=True)
# ### end Alembic commands ### | {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/alembic/versions/afd00efbd06b_add_unique_user_id_constraints.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/llms/sarvam.py | import os
from typing import Dict, List, Optional
import requests
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.base import LLMBase
class SarvamLLM(LLMBase):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
# Set default model if not provided
if not self.config.model:
self.config.model = "sarvam-m"
# Get API key from config or environment variable
self.api_key = self.config.api_key or os.getenv("SARVAM_API_KEY")
if not self.api_key:
raise ValueError(
"Sarvam API key is required. Set SARVAM_API_KEY environment variable or provide api_key in config."
)
# Set base URL - use config value or environment or default
self.base_url = (
getattr(self.config, "sarvam_base_url", None) or os.getenv("SARVAM_API_BASE") or "https://api.sarvam.ai/v1"
)
def generate_response(self, messages: List[Dict[str, str]], response_format=None) -> str:
"""
Generate a response based on the given messages using Sarvam-M.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response.
Currently not used by Sarvam API.
Returns:
str: The generated response.
"""
url = f"{self.base_url}/chat/completions"
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# Prepare the request payload
params = {
"messages": messages,
"model": self.config.model if isinstance(self.config.model, str) else "sarvam-m",
}
# Add standard parameters that already exist in BaseLlmConfig
if self.config.temperature is not None:
params["temperature"] = self.config.temperature
if self.config.max_tokens is not None:
params["max_tokens"] = self.config.max_tokens
if self.config.top_p is not None:
params["top_p"] = self.config.top_p
# Handle Sarvam-specific parameters if model is passed as dict
if isinstance(self.config.model, dict):
# Extract model name
params["model"] = self.config.model.get("name", "sarvam-m")
# Add Sarvam-specific parameters
sarvam_specific_params = ["reasoning_effort", "frequency_penalty", "presence_penalty", "seed", "stop", "n"]
for param in sarvam_specific_params:
if param in self.config.model:
params[param] = self.config.model[param]
try:
response = requests.post(url, headers=headers, json=params, timeout=30)
response.raise_for_status()
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
return result["choices"][0]["message"]["content"]
else:
raise ValueError("No response choices found in Sarvam API response")
except requests.exceptions.RequestException as e:
raise RuntimeError(f"Sarvam API request failed: {e}")
except KeyError as e:
raise ValueError(f"Unexpected response format from Sarvam API: {e}")
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/llms/sarvam.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/alembic/versions/add_config_table.py | """add_config_table
Revision ID: add_config_table
Revises: 0b53c747049a
Create Date: 2023-06-01 10:00:00.000000
"""
import uuid
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'add_config_table'
down_revision = '0b53c747049a'
branch_labels = None
depends_on = None
def upgrade():
# Create configs table if it doesn't exist
op.create_table(
'configs',
sa.Column('id', sa.UUID(), nullable=False, default=lambda: uuid.uuid4()),
sa.Column('key', sa.String(), nullable=False),
sa.Column('value', sa.JSON(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
# Create index for key lookups
op.create_index('idx_configs_key', 'configs', ['key'])
def downgrade():
# Drop the configs table
op.drop_index('idx_configs_key', 'configs')
op.drop_table('configs') | {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/alembic/versions/add_config_table.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/routers/config.py | from typing import Any, Dict, Optional
from app.database import get_db
from app.models import Config as ConfigModel
from app.utils.memory import reset_memory_client
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
router = APIRouter(prefix="/api/v1/config", tags=["config"])
class LLMConfig(BaseModel):
model: str = Field(..., description="LLM model name")
temperature: float = Field(..., description="Temperature setting for the model")
max_tokens: int = Field(..., description="Maximum tokens to generate")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
class LLMProvider(BaseModel):
provider: str = Field(..., description="LLM provider name")
config: LLMConfig
class EmbedderConfig(BaseModel):
model: str = Field(..., description="Embedder model name")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
class EmbedderProvider(BaseModel):
provider: str = Field(..., description="Embedder provider name")
config: EmbedderConfig
class VectorStoreProvider(BaseModel):
provider: str = Field(..., description="Vector store provider name")
# Below config can vary widely based on the vector store used. Refer https://docs.mem0.ai/components/vectordbs/config
config: Dict[str, Any] = Field(..., description="Vector store-specific configuration")
class OpenMemoryConfig(BaseModel):
custom_instructions: Optional[str] = Field(None, description="Custom instructions for memory management and fact extraction")
class Mem0Config(BaseModel):
llm: Optional[LLMProvider] = None
embedder: Optional[EmbedderProvider] = None
vector_store: Optional[VectorStoreProvider] = None
class ConfigSchema(BaseModel):
openmemory: Optional[OpenMemoryConfig] = None
mem0: Optional[Mem0Config] = None
def get_default_configuration():
"""Get the default configuration with sensible defaults for LLM and embedder."""
return {
"openmemory": {
"custom_instructions": None
},
"mem0": {
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
"temperature": 0.1,
"max_tokens": 2000,
"api_key": "env:OPENAI_API_KEY"
}
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "env:OPENAI_API_KEY"
}
},
"vector_store": None
}
}
def get_config_from_db(db: Session, key: str = "main"):
"""Get configuration from database."""
config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
if not config:
# Create default config with proper provider configurations
default_config = get_default_configuration()
db_config = ConfigModel(key=key, value=default_config)
db.add(db_config)
db.commit()
db.refresh(db_config)
return default_config
# Ensure the config has all required sections with defaults
config_value = config.value
default_config = get_default_configuration()
# Merge with defaults to ensure all required fields exist
if "openmemory" not in config_value:
config_value["openmemory"] = default_config["openmemory"]
if "mem0" not in config_value:
config_value["mem0"] = default_config["mem0"]
else:
# Ensure LLM config exists with defaults
if "llm" not in config_value["mem0"] or config_value["mem0"]["llm"] is None:
config_value["mem0"]["llm"] = default_config["mem0"]["llm"]
# Ensure embedder config exists with defaults
if "embedder" not in config_value["mem0"] or config_value["mem0"]["embedder"] is None:
config_value["mem0"]["embedder"] = default_config["mem0"]["embedder"]
# Ensure vector_store config exists with defaults
if "vector_store" not in config_value["mem0"]:
config_value["mem0"]["vector_store"] = default_config["mem0"]["vector_store"]
# Save the updated config back to database if it was modified
if config_value != config.value:
config.value = config_value
db.commit()
db.refresh(config)
return config_value
def save_config_to_db(db: Session, config: Dict[str, Any], key: str = "main"):
"""Save configuration to database."""
db_config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
if db_config:
db_config.value = config
db_config.updated_at = None # Will trigger the onupdate to set current time
else:
db_config = ConfigModel(key=key, value=config)
db.add(db_config)
db.commit()
db.refresh(db_config)
return db_config.value
@router.get("/", response_model=ConfigSchema)
async def get_configuration(db: Session = Depends(get_db)):
"""Get the current configuration."""
config = get_config_from_db(db)
return config
@router.put("/", response_model=ConfigSchema)
async def update_configuration(config: ConfigSchema, db: Session = Depends(get_db)):
"""Update the configuration."""
current_config = get_config_from_db(db)
# Convert to dict for processing
updated_config = current_config.copy()
# Update openmemory settings if provided
if config.openmemory is not None:
if "openmemory" not in updated_config:
updated_config["openmemory"] = {}
updated_config["openmemory"].update(config.openmemory.dict(exclude_none=True))
# Update mem0 settings
updated_config["mem0"] = config.mem0.dict(exclude_none=True)
@router.patch("/", response_model=ConfigSchema)
async def patch_configuration(config_update: ConfigSchema, db: Session = Depends(get_db)):
"""Update parts of the configuration."""
current_config = get_config_from_db(db)
def deep_update(source, overrides):
for key, value in overrides.items():
if isinstance(value, dict) and key in source and isinstance(source[key], dict):
source[key] = deep_update(source[key], value)
else:
source[key] = value
return source
update_data = config_update.dict(exclude_unset=True)
updated_config = deep_update(current_config, update_data)
save_config_to_db(db, updated_config)
reset_memory_client()
return updated_config
@router.post("/reset", response_model=ConfigSchema)
async def reset_configuration(db: Session = Depends(get_db)):
"""Reset the configuration to default values."""
try:
# Get the default configuration with proper provider setups
default_config = get_default_configuration()
# Save it as the current configuration in the database
save_config_to_db(db, default_config)
reset_memory_client()
return default_config
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to reset configuration: {str(e)}"
)
@router.get("/mem0/llm", response_model=LLMProvider)
async def get_llm_configuration(db: Session = Depends(get_db)):
"""Get only the LLM configuration."""
config = get_config_from_db(db)
llm_config = config.get("mem0", {}).get("llm", {})
return llm_config
@router.put("/mem0/llm", response_model=LLMProvider)
async def update_llm_configuration(llm_config: LLMProvider, db: Session = Depends(get_db)):
"""Update only the LLM configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the LLM configuration
current_config["mem0"]["llm"] = llm_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["llm"]
@router.get("/mem0/embedder", response_model=EmbedderProvider)
async def get_embedder_configuration(db: Session = Depends(get_db)):
"""Get only the Embedder configuration."""
config = get_config_from_db(db)
embedder_config = config.get("mem0", {}).get("embedder", {})
return embedder_config
@router.put("/mem0/embedder", response_model=EmbedderProvider)
async def update_embedder_configuration(embedder_config: EmbedderProvider, db: Session = Depends(get_db)):
"""Update only the Embedder configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the Embedder configuration
current_config["mem0"]["embedder"] = embedder_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["embedder"]
@router.get("/mem0/vector_store", response_model=Optional[VectorStoreProvider])
async def get_vector_store_configuration(db: Session = Depends(get_db)):
"""Get only the Vector Store configuration."""
config = get_config_from_db(db)
vector_store_config = config.get("mem0", {}).get("vector_store", None)
return vector_store_config
@router.put("/mem0/vector_store", response_model=VectorStoreProvider)
async def update_vector_store_configuration(vector_store_config: VectorStoreProvider, db: Session = Depends(get_db)):
"""Update only the Vector Store configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the Vector Store configuration
current_config["mem0"]["vector_store"] = vector_store_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["vector_store"]
@router.get("/openmemory", response_model=OpenMemoryConfig)
async def get_openmemory_configuration(db: Session = Depends(get_db)):
"""Get only the OpenMemory configuration."""
config = get_config_from_db(db)
openmemory_config = config.get("openmemory", {})
return openmemory_config
@router.put("/openmemory", response_model=OpenMemoryConfig)
async def update_openmemory_configuration(openmemory_config: OpenMemoryConfig, db: Session = Depends(get_db)):
"""Update only the OpenMemory configuration."""
current_config = get_config_from_db(db)
# Ensure openmemory key exists
if "openmemory" not in current_config:
current_config["openmemory"] = {}
# Update the OpenMemory configuration
current_config["openmemory"].update(openmemory_config.dict(exclude_none=True))
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["openmemory"]
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/routers/config.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:examples/misc/healthcare_assistant_google_adk.py | import asyncio
import warnings
from google.adk.agents import Agent
from google.adk.runners import Runner
from google.adk.sessions import InMemorySessionService
from google.genai import types
from mem0 import MemoryClient
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Initialize Mem0 client
mem0_client = MemoryClient()
# Define Memory Tools
def save_patient_info(information: str) -> dict:
"""Saves important patient information to memory."""
print(f"Storing patient information: {information[:30]}...")
# Get user_id from session state or use default
user_id = getattr(save_patient_info, "user_id", "default_user")
# Store in Mem0
mem0_client.add(
[{"role": "user", "content": information}],
user_id=user_id,
run_id="healthcare_session",
metadata={"type": "patient_information"},
)
return {"status": "success", "message": "Information saved"}
def retrieve_patient_info(query: str) -> str:
"""Retrieves relevant patient information from memory."""
print(f"Searching for patient information: {query}")
# Get user_id from session state or use default
user_id = getattr(retrieve_patient_info, "user_id", "default_user")
# Search Mem0
results = mem0_client.search(
query,
user_id=user_id,
run_id="healthcare_session",
limit=5,
threshold=0.7, # Higher threshold for more relevant results
)
if not results:
return "I don't have any relevant memories about this topic."
memories = [f"• {result['memory']}" for result in results]
return "Here's what I remember that might be relevant:\n" + "\n".join(memories)
# Define Healthcare Tools
def schedule_appointment(date: str, time: str, reason: str) -> dict:
"""Schedules a doctor's appointment."""
# In a real app, this would connect to a scheduling system
appointment_id = f"APT-{hash(date + time) % 10000}"
return {
"status": "success",
"appointment_id": appointment_id,
"confirmation": f"Appointment scheduled for {date} at {time} for {reason}",
"message": "Please arrive 15 minutes early to complete paperwork.",
}
# Create the Healthcare Assistant Agent
healthcare_agent = Agent(
name="healthcare_assistant",
model="gemini-1.5-flash", # Using Gemini for healthcare assistant
description="Healthcare assistant that helps patients with health information and appointment scheduling.",
instruction="""You are a helpful Healthcare Assistant with memory capabilities.
Your primary responsibilities are to:
1. Remember patient information using the 'save_patient_info' tool when they share symptoms, conditions, or preferences.
2. Retrieve past patient information using the 'retrieve_patient_info' tool when relevant to the current conversation.
3. Help schedule appointments using the 'schedule_appointment' tool.
IMPORTANT GUIDELINES:
- Always be empathetic, professional, and helpful.
- Save important patient information like symptoms, conditions, allergies, and preferences.
- Check if you have relevant patient information before asking for details they may have shared previously.
- Make it clear you are not a doctor and cannot provide medical diagnosis or treatment.
- For serious symptoms, always recommend consulting a healthcare professional.
- Keep all patient information confidential.
""",
tools=[save_patient_info, retrieve_patient_info, schedule_appointment],
)
# Set Up Session and Runner
session_service = InMemorySessionService()
# Define constants for the conversation
APP_NAME = "healthcare_assistant_app"
USER_ID = "Alex"
SESSION_ID = "session_001"
# Create a session
session = session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID)
# Create the runner
runner = Runner(agent=healthcare_agent, app_name=APP_NAME, session_service=session_service)
# Interact with the Healthcare Assistant
async def call_agent_async(query, runner, user_id, session_id):
"""Sends a query to the agent and returns the final response."""
print(f"\n>>> Patient: {query}")
# Format the user's message
content = types.Content(role="user", parts=[types.Part(text=query)])
# Set user_id for tools to access
save_patient_info.user_id = user_id
retrieve_patient_info.user_id = user_id
# Run the agent
async for event in runner.run_async(user_id=user_id, session_id=session_id, new_message=content):
if event.is_final_response():
if event.content and event.content.parts:
response = event.content.parts[0].text
print(f"<<< Assistant: {response}")
return response
return "No response received."
# Example conversation flow
async def run_conversation():
# First interaction - patient introduces themselves with key information
await call_agent_async(
"Hi, I'm Alex. I've been having headaches for the past week, and I have a penicillin allergy.",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Request for health information
await call_agent_async(
"Can you tell me more about what might be causing my headaches?",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Schedule an appointment
await call_agent_async(
"I think I should see a doctor. Can you help me schedule an appointment for next Monday at 2pm?",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Test memory - should remember patient name, symptoms, and allergy
await call_agent_async(
"What medications should I avoid for my headaches?", runner=runner, user_id=USER_ID, session_id=SESSION_ID
)
# Interactive mode
async def interactive_mode():
"""Run an interactive chat session with the healthcare assistant."""
print("=== Healthcare Assistant Interactive Mode ===")
print("Enter 'exit' to quit at any time.")
# Get user information
patient_id = input("Enter patient ID (or press Enter for default): ").strip() or USER_ID
session_id = f"session_{hash(patient_id) % 1000:03d}"
# Create session for this user
session_service.create_session(app_name=APP_NAME, user_id=patient_id, session_id=session_id)
print(f"\nStarting conversation with patient ID: {patient_id}")
print("Type your message and press Enter.")
while True:
user_input = input("\n>>> Patient: ").strip()
if user_input.lower() in ["exit", "quit", "bye"]:
print("Ending conversation. Thank you!")
break
await call_agent_async(user_input, runner=runner, user_id=patient_id, session_id=session_id)
# Main execution
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Healthcare Assistant with Memory")
parser.add_argument("--demo", action="store_true", help="Run the demo conversation")
parser.add_argument("--interactive", action="store_true", help="Run in interactive mode")
parser.add_argument("--patient-id", type=str, default=USER_ID, help="Patient ID for the conversation")
args = parser.parse_args()
if args.demo:
asyncio.run(run_conversation())
elif args.interactive:
asyncio.run(interactive_mode())
else:
# Default to demo mode if no arguments provided
asyncio.run(run_conversation())
| {
"repo_id": "mem0ai/mem0",
"file_path": "examples/misc/healthcare_assistant_google_adk.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/alembic/env.py | import os
import sys
from logging.config import fileConfig
from alembic import context
from dotenv import load_dotenv
from sqlalchemy import engine_from_config, pool
# Add the parent directory to the Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Load environment variables
load_dotenv()
# Import your models here - moved after path setup
from app.database import Base # noqa: E402
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
connectable = engine_from_config(
configuration,
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/alembic/env.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/alembic/versions/0b53c747049a_initial_migration.py | """Initial migration
Revision ID: 0b53c747049a
Revises:
Create Date: 2025-04-19 00:59:56.244203
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0b53c747049a'
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('access_controls',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('subject_type', sa.String(), nullable=False),
sa.Column('subject_id', sa.UUID(), nullable=True),
sa.Column('object_type', sa.String(), nullable=False),
sa.Column('object_id', sa.UUID(), nullable=True),
sa.Column('effect', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_access_object', 'access_controls', ['object_type', 'object_id'], unique=False)
op.create_index('idx_access_subject', 'access_controls', ['subject_type', 'subject_id'], unique=False)
op.create_index(op.f('ix_access_controls_created_at'), 'access_controls', ['created_at'], unique=False)
op.create_index(op.f('ix_access_controls_effect'), 'access_controls', ['effect'], unique=False)
op.create_index(op.f('ix_access_controls_object_id'), 'access_controls', ['object_id'], unique=False)
op.create_index(op.f('ix_access_controls_object_type'), 'access_controls', ['object_type'], unique=False)
op.create_index(op.f('ix_access_controls_subject_id'), 'access_controls', ['subject_id'], unique=False)
op.create_index(op.f('ix_access_controls_subject_type'), 'access_controls', ['subject_type'], unique=False)
op.create_table('archive_policies',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('criteria_type', sa.String(), nullable=False),
sa.Column('criteria_id', sa.UUID(), nullable=True),
sa.Column('days_to_archive', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_policy_criteria', 'archive_policies', ['criteria_type', 'criteria_id'], unique=False)
op.create_index(op.f('ix_archive_policies_created_at'), 'archive_policies', ['created_at'], unique=False)
op.create_index(op.f('ix_archive_policies_criteria_id'), 'archive_policies', ['criteria_id'], unique=False)
op.create_index(op.f('ix_archive_policies_criteria_type'), 'archive_policies', ['criteria_type'], unique=False)
op.create_table('categories',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_categories_created_at'), 'categories', ['created_at'], unique=False)
op.create_index(op.f('ix_categories_name'), 'categories', ['name'], unique=True)
op.create_table('users',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('user_id', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_created_at'), 'users', ['created_at'], unique=False)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_name'), 'users', ['name'], unique=False)
op.create_index(op.f('ix_users_user_id'), 'users', ['user_id'], unique=True)
op.create_table('apps',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('owner_id', sa.UUID(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_apps_created_at'), 'apps', ['created_at'], unique=False)
op.create_index(op.f('ix_apps_is_active'), 'apps', ['is_active'], unique=False)
op.create_index(op.f('ix_apps_name'), 'apps', ['name'], unique=True)
op.create_index(op.f('ix_apps_owner_id'), 'apps', ['owner_id'], unique=False)
op.create_table('memories',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('user_id', sa.UUID(), nullable=False),
sa.Column('app_id', sa.UUID(), nullable=False),
sa.Column('content', sa.String(), nullable=False),
sa.Column('vector', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('archived_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_memory_app_state', 'memories', ['app_id', 'state'], unique=False)
op.create_index('idx_memory_user_app', 'memories', ['user_id', 'app_id'], unique=False)
op.create_index('idx_memory_user_state', 'memories', ['user_id', 'state'], unique=False)
op.create_index(op.f('ix_memories_app_id'), 'memories', ['app_id'], unique=False)
op.create_index(op.f('ix_memories_archived_at'), 'memories', ['archived_at'], unique=False)
op.create_index(op.f('ix_memories_created_at'), 'memories', ['created_at'], unique=False)
op.create_index(op.f('ix_memories_deleted_at'), 'memories', ['deleted_at'], unique=False)
op.create_index(op.f('ix_memories_state'), 'memories', ['state'], unique=False)
op.create_index(op.f('ix_memories_user_id'), 'memories', ['user_id'], unique=False)
op.create_table('memory_access_logs',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('app_id', sa.UUID(), nullable=False),
sa.Column('accessed_at', sa.DateTime(), nullable=True),
sa.Column('access_type', sa.String(), nullable=False),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_access_app_time', 'memory_access_logs', ['app_id', 'accessed_at'], unique=False)
op.create_index('idx_access_memory_time', 'memory_access_logs', ['memory_id', 'accessed_at'], unique=False)
op.create_index(op.f('ix_memory_access_logs_access_type'), 'memory_access_logs', ['access_type'], unique=False)
op.create_index(op.f('ix_memory_access_logs_accessed_at'), 'memory_access_logs', ['accessed_at'], unique=False)
op.create_index(op.f('ix_memory_access_logs_app_id'), 'memory_access_logs', ['app_id'], unique=False)
op.create_index(op.f('ix_memory_access_logs_memory_id'), 'memory_access_logs', ['memory_id'], unique=False)
op.create_table('memory_categories',
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('category_id', sa.UUID(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('memory_id', 'category_id')
)
op.create_index('idx_memory_category', 'memory_categories', ['memory_id', 'category_id'], unique=False)
op.create_index(op.f('ix_memory_categories_category_id'), 'memory_categories', ['category_id'], unique=False)
op.create_index(op.f('ix_memory_categories_memory_id'), 'memory_categories', ['memory_id'], unique=False)
op.create_table('memory_status_history',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('changed_by', sa.UUID(), nullable=False),
sa.Column('old_state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=False),
sa.Column('new_state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=False),
sa.Column('changed_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['changed_by'], ['users.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_history_memory_state', 'memory_status_history', ['memory_id', 'new_state'], unique=False)
op.create_index('idx_history_user_time', 'memory_status_history', ['changed_by', 'changed_at'], unique=False)
op.create_index(op.f('ix_memory_status_history_changed_at'), 'memory_status_history', ['changed_at'], unique=False)
op.create_index(op.f('ix_memory_status_history_changed_by'), 'memory_status_history', ['changed_by'], unique=False)
op.create_index(op.f('ix_memory_status_history_memory_id'), 'memory_status_history', ['memory_id'], unique=False)
op.create_index(op.f('ix_memory_status_history_new_state'), 'memory_status_history', ['new_state'], unique=False)
op.create_index(op.f('ix_memory_status_history_old_state'), 'memory_status_history', ['old_state'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_memory_status_history_old_state'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_new_state'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_memory_id'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_changed_by'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_changed_at'), table_name='memory_status_history')
op.drop_index('idx_history_user_time', table_name='memory_status_history')
op.drop_index('idx_history_memory_state', table_name='memory_status_history')
op.drop_table('memory_status_history')
op.drop_index(op.f('ix_memory_categories_memory_id'), table_name='memory_categories')
op.drop_index(op.f('ix_memory_categories_category_id'), table_name='memory_categories')
op.drop_index('idx_memory_category', table_name='memory_categories')
op.drop_table('memory_categories')
op.drop_index(op.f('ix_memory_access_logs_memory_id'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_app_id'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_accessed_at'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_access_type'), table_name='memory_access_logs')
op.drop_index('idx_access_memory_time', table_name='memory_access_logs')
op.drop_index('idx_access_app_time', table_name='memory_access_logs')
op.drop_table('memory_access_logs')
op.drop_index(op.f('ix_memories_user_id'), table_name='memories')
op.drop_index(op.f('ix_memories_state'), table_name='memories')
op.drop_index(op.f('ix_memories_deleted_at'), table_name='memories')
op.drop_index(op.f('ix_memories_created_at'), table_name='memories')
op.drop_index(op.f('ix_memories_archived_at'), table_name='memories')
op.drop_index(op.f('ix_memories_app_id'), table_name='memories')
op.drop_index('idx_memory_user_state', table_name='memories')
op.drop_index('idx_memory_user_app', table_name='memories')
op.drop_index('idx_memory_app_state', table_name='memories')
op.drop_table('memories')
op.drop_index(op.f('ix_apps_owner_id'), table_name='apps')
op.drop_index(op.f('ix_apps_name'), table_name='apps')
op.drop_index(op.f('ix_apps_is_active'), table_name='apps')
op.drop_index(op.f('ix_apps_created_at'), table_name='apps')
op.drop_table('apps')
op.drop_index(op.f('ix_users_user_id'), table_name='users')
op.drop_index(op.f('ix_users_name'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_index(op.f('ix_users_created_at'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_categories_name'), table_name='categories')
op.drop_index(op.f('ix_categories_created_at'), table_name='categories')
op.drop_table('categories')
op.drop_index(op.f('ix_archive_policies_criteria_type'), table_name='archive_policies')
op.drop_index(op.f('ix_archive_policies_criteria_id'), table_name='archive_policies')
op.drop_index(op.f('ix_archive_policies_created_at'), table_name='archive_policies')
op.drop_index('idx_policy_criteria', table_name='archive_policies')
op.drop_table('archive_policies')
op.drop_index(op.f('ix_access_controls_subject_type'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_subject_id'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_object_type'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_object_id'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_effect'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_created_at'), table_name='access_controls')
op.drop_index('idx_access_subject', table_name='access_controls')
op.drop_index('idx_access_object', table_name='access_controls')
op.drop_table('access_controls')
# ### end Alembic commands ###
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/alembic/versions/0b53c747049a_initial_migration.py",
"license": "Apache License 2.0",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/database.py | import os
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.orm import declarative_base, sessionmaker
# load .env file (make sure you have DATABASE_URL set)
load_dotenv()
DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
if not DATABASE_URL:
raise RuntimeError("DATABASE_URL is not set in environment")
# SQLAlchemy engine & session
engine = create_engine(
DATABASE_URL,
connect_args={"check_same_thread": False} # Needed for SQLite
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Base class for models
Base = declarative_base()
# Dependency for FastAPI
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/database.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/mcp_server.py | """
MCP Server for OpenMemory with resilient memory client handling.
This module implements an MCP (Model Context Protocol) server that provides
memory operations for OpenMemory. The memory client is initialized lazily
to prevent server crashes when external dependencies (like Ollama) are
unavailable. If the memory client cannot be initialized, the server will
continue running with limited functionality and appropriate error messages.
Key features:
- Lazy memory client initialization
- Graceful error handling for unavailable dependencies
- Fallback to database-only mode when vector store is unavailable
- Proper logging for debugging connection issues
- Environment variable parsing for API keys
"""
import contextvars
import datetime
import json
import logging
import uuid
from app.database import SessionLocal
from app.models import Memory, MemoryAccessLog, MemoryState, MemoryStatusHistory
from app.utils.db import get_user_and_app
from app.utils.memory import get_memory_client
from app.utils.permissions import check_memory_access_permissions
from dotenv import load_dotenv
from fastapi import FastAPI, Request
from fastapi.routing import APIRouter
from mcp.server.fastmcp import FastMCP
from mcp.server.sse import SseServerTransport
# Load environment variables
load_dotenv()
# Initialize MCP
mcp = FastMCP("mem0-mcp-server")
# Don't initialize memory client at import time - do it lazily when needed
def get_memory_client_safe():
"""Get memory client with error handling. Returns None if client cannot be initialized."""
try:
return get_memory_client()
except Exception as e:
logging.warning(f"Failed to get memory client: {e}")
return None
# Context variables for user_id and client_name
user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id")
client_name_var: contextvars.ContextVar[str] = contextvars.ContextVar("client_name")
# Create a router for MCP endpoints
mcp_router = APIRouter(prefix="/mcp")
# Initialize SSE transport
sse = SseServerTransport("/mcp/messages/")
@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something.")
async def add_memories(text: str) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Check if app is active
if not app.is_active:
return f"Error: App {app.name} is currently paused on OpenMemory. Cannot create new memories."
response = memory_client.add(text,
user_id=uid,
metadata={
"source_app": "openmemory",
"mcp_client": client_name,
})
# Process the response and update database
if isinstance(response, dict) and 'results' in response:
for result in response['results']:
memory_id = uuid.UUID(result['id'])
memory = db.query(Memory).filter(Memory.id == memory_id).first()
if result['event'] == 'ADD':
if not memory:
memory = Memory(
id=memory_id,
user_id=user.id,
app_id=app.id,
content=result['memory'],
state=MemoryState.active
)
db.add(memory)
else:
memory.state = MemoryState.active
memory.content = result['memory']
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.deleted if memory else None,
new_state=MemoryState.active
)
db.add(history)
elif result['event'] == 'DELETE':
if memory:
memory.state = MemoryState.deleted
memory.deleted_at = datetime.datetime.now(datetime.UTC)
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
db.commit()
return json.dumps(response)
finally:
db.close()
except Exception as e:
logging.exception(f"Error adding to memory: {e}")
return f"Error adding to memory: {e}"
@mcp.tool(description="Search through stored memories. This method is called EVERYTIME the user asks anything.")
async def search_memory(query: str) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Get accessible memory IDs based on ACL
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
filters = {
"user_id": uid
}
embeddings = memory_client.embedding_model.embed(query, "search")
hits = memory_client.vector_store.search(
query=query,
vectors=embeddings,
limit=10,
filters=filters,
)
allowed = set(str(mid) for mid in accessible_memory_ids) if accessible_memory_ids else None
results = []
for h in hits:
# All vector db search functions return OutputData class
id, score, payload = h.id, h.score, h.payload
if allowed and h.id is None or h.id not in allowed:
continue
results.append({
"id": id,
"memory": payload.get("data"),
"hash": payload.get("hash"),
"created_at": payload.get("created_at"),
"updated_at": payload.get("updated_at"),
"score": score,
})
for r in results:
if r.get("id"):
access_log = MemoryAccessLog(
memory_id=uuid.UUID(r["id"]),
app_id=app.id,
access_type="search",
metadata_={
"query": query,
"score": r.get("score"),
"hash": r.get("hash"),
},
)
db.add(access_log)
db.commit()
return json.dumps({"results": results}, indent=2)
finally:
db.close()
except Exception as e:
logging.exception(e)
return f"Error searching memory: {e}"
@mcp.tool(description="List all memories in the user's memory")
async def list_memories() -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Get all memories
memories = memory_client.get_all(user_id=uid)
filtered_memories = []
# Filter memories based on permissions
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
if isinstance(memories, dict) and 'results' in memories:
for memory_data in memories['results']:
if 'id' in memory_data:
memory_id = uuid.UUID(memory_data['id'])
if memory_id in accessible_memory_ids:
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="list",
metadata_={
"hash": memory_data.get('hash')
}
)
db.add(access_log)
filtered_memories.append(memory_data)
db.commit()
else:
for memory in memories:
memory_id = uuid.UUID(memory['id'])
memory_obj = db.query(Memory).filter(Memory.id == memory_id).first()
if memory_obj and check_memory_access_permissions(db, memory_obj, app.id):
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="list",
metadata_={
"hash": memory.get('hash')
}
)
db.add(access_log)
filtered_memories.append(memory)
db.commit()
return json.dumps(filtered_memories, indent=2)
finally:
db.close()
except Exception as e:
logging.exception(f"Error getting memories: {e}")
return f"Error getting memories: {e}"
@mcp.tool(description="Delete specific memories by their IDs")
async def delete_memories(memory_ids: list[str]) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Convert string IDs to UUIDs and filter accessible ones
requested_ids = [uuid.UUID(mid) for mid in memory_ids]
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
# Only delete memories that are both requested and accessible
ids_to_delete = [mid for mid in requested_ids if mid in accessible_memory_ids]
if not ids_to_delete:
return "Error: No accessible memories found with provided IDs"
# Delete from vector store
for memory_id in ids_to_delete:
try:
memory_client.delete(str(memory_id))
except Exception as delete_error:
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
# Update each memory's state and create history entries
now = datetime.datetime.now(datetime.UTC)
for memory_id in ids_to_delete:
memory = db.query(Memory).filter(Memory.id == memory_id).first()
if memory:
# Update memory state
memory.state = MemoryState.deleted
memory.deleted_at = now
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="delete",
metadata_={"operation": "delete_by_id"}
)
db.add(access_log)
db.commit()
return f"Successfully deleted {len(ids_to_delete)} memories"
finally:
db.close()
except Exception as e:
logging.exception(f"Error deleting memories: {e}")
return f"Error deleting memories: {e}"
@mcp.tool(description="Delete all memories in the user's memory")
async def delete_all_memories() -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
# delete the accessible memories only
for memory_id in accessible_memory_ids:
try:
memory_client.delete(str(memory_id))
except Exception as delete_error:
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
# Update each memory's state and create history entries
now = datetime.datetime.now(datetime.UTC)
for memory_id in accessible_memory_ids:
memory = db.query(Memory).filter(Memory.id == memory_id).first()
# Update memory state
memory.state = MemoryState.deleted
memory.deleted_at = now
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="delete_all",
metadata_={"operation": "bulk_delete"}
)
db.add(access_log)
db.commit()
return "Successfully deleted all memories"
finally:
db.close()
except Exception as e:
logging.exception(f"Error deleting memories: {e}")
return f"Error deleting memories: {e}"
@mcp_router.get("/{client_name}/sse/{user_id}")
async def handle_sse(request: Request):
"""Handle SSE connections for a specific user and client"""
# Extract user_id and client_name from path parameters
uid = request.path_params.get("user_id")
user_token = user_id_var.set(uid or "")
client_name = request.path_params.get("client_name")
client_token = client_name_var.set(client_name or "")
try:
# Handle SSE connection
async with sse.connect_sse(
request.scope,
request.receive,
request._send,
) as (read_stream, write_stream):
await mcp._mcp_server.run(
read_stream,
write_stream,
mcp._mcp_server.create_initialization_options(),
)
finally:
# Clean up context variables
user_id_var.reset(user_token)
client_name_var.reset(client_token)
@mcp_router.post("/messages/")
async def handle_get_message(request: Request):
return await handle_post_message(request)
@mcp_router.post("/{client_name}/sse/{user_id}/messages/")
async def handle_post_message(request: Request):
return await handle_post_message(request)
async def handle_post_message(request: Request):
"""Handle POST messages for SSE"""
try:
body = await request.body()
# Create a simple receive function that returns the body
async def receive():
return {"type": "http.request", "body": body, "more_body": False}
# Create a simple send function that does nothing
async def send(message):
return {}
# Call handle_post_message with the correct arguments
await sse.handle_post_message(request.scope, receive, send)
# Return a success response
return {"status": "ok"}
finally:
pass
def setup_mcp_server(app: FastAPI):
"""Setup MCP server with the FastAPI application"""
mcp._mcp_server.name = "mem0-mcp-server"
# Include MCP router in the FastAPI app
app.include_router(mcp_router)
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/mcp_server.py",
"license": "Apache License 2.0",
"lines": 415,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/app/models.py | import datetime
import enum
import uuid
import sqlalchemy as sa
from app.database import Base
from app.utils.categorization import get_categories_for_memory
from sqlalchemy import (
JSON,
UUID,
Boolean,
Column,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
String,
Table,
event,
)
from sqlalchemy.orm import Session, relationship
def get_current_utc_time():
"""Get current UTC time"""
return datetime.datetime.now(datetime.UTC)
class MemoryState(enum.Enum):
active = "active"
paused = "paused"
archived = "archived"
deleted = "deleted"
class User(Base):
__tablename__ = "users"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
user_id = Column(String, nullable=False, unique=True, index=True)
name = Column(String, nullable=True, index=True)
email = Column(String, unique=True, nullable=True, index=True)
metadata_ = Column('metadata', JSON, default=dict)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
apps = relationship("App", back_populates="owner")
memories = relationship("Memory", back_populates="user")
class App(Base):
__tablename__ = "apps"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
owner_id = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
name = Column(String, nullable=False, index=True)
description = Column(String)
metadata_ = Column('metadata', JSON, default=dict)
is_active = Column(Boolean, default=True, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
owner = relationship("User", back_populates="apps")
memories = relationship("Memory", back_populates="app")
__table_args__ = (
sa.UniqueConstraint('owner_id', 'name', name='idx_app_owner_name'),
)
class Config(Base):
__tablename__ = "configs"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
key = Column(String, unique=True, nullable=False, index=True)
value = Column(JSON, nullable=False)
created_at = Column(DateTime, default=get_current_utc_time)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
class Memory(Base):
__tablename__ = "memories"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
user_id = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
app_id = Column(UUID, ForeignKey("apps.id"), nullable=False, index=True)
content = Column(String, nullable=False)
vector = Column(String)
metadata_ = Column('metadata', JSON, default=dict)
state = Column(Enum(MemoryState), default=MemoryState.active, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
archived_at = Column(DateTime, nullable=True, index=True)
deleted_at = Column(DateTime, nullable=True, index=True)
user = relationship("User", back_populates="memories")
app = relationship("App", back_populates="memories")
categories = relationship("Category", secondary="memory_categories", back_populates="memories")
__table_args__ = (
Index('idx_memory_user_state', 'user_id', 'state'),
Index('idx_memory_app_state', 'app_id', 'state'),
Index('idx_memory_user_app', 'user_id', 'app_id'),
)
class Category(Base):
__tablename__ = "categories"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
name = Column(String, unique=True, nullable=False, index=True)
description = Column(String)
created_at = Column(DateTime, default=datetime.datetime.now(datetime.UTC), index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
memories = relationship("Memory", secondary="memory_categories", back_populates="categories")
memory_categories = Table(
"memory_categories", Base.metadata,
Column("memory_id", UUID, ForeignKey("memories.id"), primary_key=True, index=True),
Column("category_id", UUID, ForeignKey("categories.id"), primary_key=True, index=True),
Index('idx_memory_category', 'memory_id', 'category_id')
)
class AccessControl(Base):
__tablename__ = "access_controls"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
subject_type = Column(String, nullable=False, index=True)
subject_id = Column(UUID, nullable=True, index=True)
object_type = Column(String, nullable=False, index=True)
object_id = Column(UUID, nullable=True, index=True)
effect = Column(String, nullable=False, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_access_subject', 'subject_type', 'subject_id'),
Index('idx_access_object', 'object_type', 'object_id'),
)
class ArchivePolicy(Base):
__tablename__ = "archive_policies"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
criteria_type = Column(String, nullable=False, index=True)
criteria_id = Column(UUID, nullable=True, index=True)
days_to_archive = Column(Integer, nullable=False)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_policy_criteria', 'criteria_type', 'criteria_id'),
)
class MemoryStatusHistory(Base):
__tablename__ = "memory_status_history"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
memory_id = Column(UUID, ForeignKey("memories.id"), nullable=False, index=True)
changed_by = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
old_state = Column(Enum(MemoryState), nullable=False, index=True)
new_state = Column(Enum(MemoryState), nullable=False, index=True)
changed_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_history_memory_state', 'memory_id', 'new_state'),
Index('idx_history_user_time', 'changed_by', 'changed_at'),
)
class MemoryAccessLog(Base):
__tablename__ = "memory_access_logs"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
memory_id = Column(UUID, ForeignKey("memories.id"), nullable=False, index=True)
app_id = Column(UUID, ForeignKey("apps.id"), nullable=False, index=True)
accessed_at = Column(DateTime, default=get_current_utc_time, index=True)
access_type = Column(String, nullable=False, index=True)
metadata_ = Column('metadata', JSON, default=dict)
__table_args__ = (
Index('idx_access_memory_time', 'memory_id', 'accessed_at'),
Index('idx_access_app_time', 'app_id', 'accessed_at'),
)
def categorize_memory(memory: Memory, db: Session) -> None:
"""Categorize a memory using OpenAI and store the categories in the database."""
try:
# Get categories from OpenAI
categories = get_categories_for_memory(memory.content)
# Get or create categories in the database
for category_name in categories:
category = db.query(Category).filter(Category.name == category_name).first()
if not category:
category = Category(
name=category_name,
description=f"Automatically created category for {category_name}"
)
db.add(category)
db.flush() # Flush to get the category ID
# Check if the memory-category association already exists
existing = db.execute(
memory_categories.select().where(
(memory_categories.c.memory_id == memory.id) &
(memory_categories.c.category_id == category.id)
)
).first()
if not existing:
# Create the association
db.execute(
memory_categories.insert().values(
memory_id=memory.id,
category_id=category.id
)
)
db.commit()
except Exception as e:
db.rollback()
print(f"Error categorizing memory: {e}")
@event.listens_for(Memory, 'after_insert')
def after_memory_insert(mapper, connection, target):
"""Trigger categorization after a memory is inserted."""
db = Session(bind=connection)
categorize_memory(target, db)
db.close()
@event.listens_for(Memory, 'after_update')
def after_memory_update(mapper, connection, target):
"""Trigger categorization after a memory is updated."""
db = Session(bind=connection)
categorize_memory(target, db)
db.close()
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/models.py",
"license": "Apache License 2.0",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/routers/apps.py | from typing import Optional
from uuid import UUID
from app.database import get_db
from app.models import App, Memory, MemoryAccessLog, MemoryState
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy import desc, func
from sqlalchemy.orm import Session, joinedload
router = APIRouter(prefix="/api/v1/apps", tags=["apps"])
# Helper functions
def get_app_or_404(db: Session, app_id: UUID) -> App:
app = db.query(App).filter(App.id == app_id).first()
if not app:
raise HTTPException(status_code=404, detail="App not found")
return app
# List all apps with filtering
@router.get("/")
async def list_apps(
name: Optional[str] = None,
is_active: Optional[bool] = None,
sort_by: str = 'name',
sort_direction: str = 'asc',
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
# Create a subquery for memory counts
memory_counts = db.query(
Memory.app_id,
func.count(Memory.id).label('memory_count')
).filter(
Memory.state.in_([MemoryState.active, MemoryState.paused, MemoryState.archived])
).group_by(Memory.app_id).subquery()
# Create a subquery for access counts
access_counts = db.query(
MemoryAccessLog.app_id,
func.count(func.distinct(MemoryAccessLog.memory_id)).label('access_count')
).group_by(MemoryAccessLog.app_id).subquery()
# Base query
query = db.query(
App,
func.coalesce(memory_counts.c.memory_count, 0).label('total_memories_created'),
func.coalesce(access_counts.c.access_count, 0).label('total_memories_accessed')
)
# Join with subqueries
query = query.outerjoin(
memory_counts,
App.id == memory_counts.c.app_id
).outerjoin(
access_counts,
App.id == access_counts.c.app_id
)
if name:
query = query.filter(App.name.ilike(f"%{name}%"))
if is_active is not None:
query = query.filter(App.is_active == is_active)
# Apply sorting
if sort_by == 'name':
sort_field = App.name
elif sort_by == 'memories':
sort_field = func.coalesce(memory_counts.c.memory_count, 0)
elif sort_by == 'memories_accessed':
sort_field = func.coalesce(access_counts.c.access_count, 0)
else:
sort_field = App.name # default sort
if sort_direction == 'desc':
query = query.order_by(desc(sort_field))
else:
query = query.order_by(sort_field)
total = query.count()
apps = query.offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"apps": [
{
"id": app[0].id,
"name": app[0].name,
"is_active": app[0].is_active,
"total_memories_created": app[1],
"total_memories_accessed": app[2]
}
for app in apps
]
}
# Get app details
@router.get("/{app_id}")
async def get_app_details(
app_id: UUID,
db: Session = Depends(get_db)
):
app = get_app_or_404(db, app_id)
# Get memory access statistics
access_stats = db.query(
func.count(MemoryAccessLog.id).label("total_memories_accessed"),
func.min(MemoryAccessLog.accessed_at).label("first_accessed"),
func.max(MemoryAccessLog.accessed_at).label("last_accessed")
).filter(MemoryAccessLog.app_id == app_id).first()
return {
"is_active": app.is_active,
"total_memories_created": db.query(Memory)
.filter(Memory.app_id == app_id)
.count(),
"total_memories_accessed": access_stats.total_memories_accessed or 0,
"first_accessed": access_stats.first_accessed,
"last_accessed": access_stats.last_accessed
}
# List memories created by app
@router.get("/{app_id}/memories")
async def list_app_memories(
app_id: UUID,
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
get_app_or_404(db, app_id)
query = db.query(Memory).filter(
Memory.app_id == app_id,
Memory.state.in_([MemoryState.active, MemoryState.paused, MemoryState.archived])
)
# Add eager loading for categories
query = query.options(joinedload(Memory.categories))
total = query.count()
memories = query.order_by(Memory.created_at.desc()).offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"memories": [
{
"id": memory.id,
"content": memory.content,
"created_at": memory.created_at,
"state": memory.state.value,
"app_id": memory.app_id,
"categories": [category.name for category in memory.categories],
"metadata_": memory.metadata_
}
for memory in memories
]
}
# List memories accessed by app
@router.get("/{app_id}/accessed")
async def list_app_accessed_memories(
app_id: UUID,
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
# Get memories with access counts
query = db.query(
Memory,
func.count(MemoryAccessLog.id).label("access_count")
).join(
MemoryAccessLog,
Memory.id == MemoryAccessLog.memory_id
).filter(
MemoryAccessLog.app_id == app_id
).group_by(
Memory.id
).order_by(
desc("access_count")
)
# Add eager loading for categories
query = query.options(joinedload(Memory.categories))
total = query.count()
results = query.offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"memories": [
{
"memory": {
"id": memory.id,
"content": memory.content,
"created_at": memory.created_at,
"state": memory.state.value,
"app_id": memory.app_id,
"app_name": memory.app.name if memory.app else None,
"categories": [category.name for category in memory.categories],
"metadata_": memory.metadata_
},
"access_count": count
}
for memory, count in results
]
}
@router.put("/{app_id}")
async def update_app_details(
app_id: UUID,
is_active: bool,
db: Session = Depends(get_db)
):
app = get_app_or_404(db, app_id)
app.is_active = is_active
db.commit()
return {"status": "success", "message": "Updated app details successfully"}
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/routers/apps.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/app/routers/stats.py | from app.database import get_db
from app.models import App, Memory, MemoryState, User
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
router = APIRouter(prefix="/api/v1/stats", tags=["stats"])
@router.get("/")
async def get_profile(
user_id: str,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get total number of memories
total_memories = db.query(Memory).filter(Memory.user_id == user.id, Memory.state != MemoryState.deleted).count()
# Get total number of apps
apps = db.query(App).filter(App.owner == user)
total_apps = apps.count()
return {
"total_memories": total_memories,
"total_apps": total_apps,
"apps": apps.all()
}
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/routers/stats.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/schemas.py | from datetime import datetime
from typing import List, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, validator
class MemoryBase(BaseModel):
content: str
metadata_: Optional[dict] = Field(default_factory=dict)
class MemoryCreate(MemoryBase):
user_id: UUID
app_id: UUID
class Category(BaseModel):
name: str
class App(BaseModel):
id: UUID
name: str
class Memory(MemoryBase):
id: UUID
user_id: UUID
app_id: UUID
created_at: datetime
updated_at: Optional[datetime] = None
state: str
categories: Optional[List[Category]] = None
app: App
model_config = ConfigDict(from_attributes=True)
class MemoryUpdate(BaseModel):
content: Optional[str] = None
metadata_: Optional[dict] = None
state: Optional[str] = None
class MemoryResponse(BaseModel):
id: UUID
content: str
created_at: int
state: str
app_id: UUID
app_name: str
categories: List[str]
metadata_: Optional[dict] = None
@validator('created_at', pre=True)
def convert_to_epoch(cls, v):
if isinstance(v, datetime):
return int(v.timestamp())
return v
class PaginatedMemoryResponse(BaseModel):
items: List[MemoryResponse]
total: int
page: int
size: int
pages: int
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/schemas.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/utils/categorization.py | import logging
from typing import List
from app.utils.prompts import MEMORY_CATEGORIZATION_PROMPT
from dotenv import load_dotenv
from openai import OpenAI
from pydantic import BaseModel
from tenacity import retry, stop_after_attempt, wait_exponential
load_dotenv()
openai_client = OpenAI()
class MemoryCategories(BaseModel):
categories: List[str]
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=15))
def get_categories_for_memory(memory: str) -> List[str]:
try:
messages = [
{"role": "system", "content": MEMORY_CATEGORIZATION_PROMPT},
{"role": "user", "content": memory}
]
# Let OpenAI handle the pydantic parsing directly
completion = openai_client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=MemoryCategories,
temperature=0
)
parsed: MemoryCategories = completion.choices[0].message.parsed
return [cat.strip().lower() for cat in parsed.categories]
except Exception as e:
logging.error(f"[ERROR] Failed to get categories: {e}")
try:
logging.debug(f"[DEBUG] Raw response: {completion.choices[0].message.content}")
except Exception as debug_e:
logging.debug(f"[DEBUG] Could not extract raw response: {debug_e}")
raise
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/utils/categorization.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/utils/db.py | from typing import Tuple
from app.models import App, User
from sqlalchemy.orm import Session
def get_or_create_user(db: Session, user_id: str) -> User:
"""Get or create a user with the given user_id"""
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
user = User(user_id=user_id)
db.add(user)
db.commit()
db.refresh(user)
return user
def get_or_create_app(db: Session, user: User, app_id: str) -> App:
"""Get or create an app for the given user"""
app = db.query(App).filter(App.owner_id == user.id, App.name == app_id).first()
if not app:
app = App(owner_id=user.id, name=app_id)
db.add(app)
db.commit()
db.refresh(app)
return app
def get_user_and_app(db: Session, user_id: str, app_id: str) -> Tuple[User, App]:
"""Get or create both user and their app"""
user = get_or_create_user(db, user_id)
app = get_or_create_app(db, user, app_id)
return user, app
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/utils/db.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:openmemory/api/app/utils/memory.py | """
Memory client utilities for OpenMemory.
This module provides functionality to initialize and manage the Mem0 memory client
with automatic configuration management and Docker environment support.
Docker Ollama Configuration:
When running inside a Docker container and using Ollama as the LLM or embedder provider,
the system automatically detects the Docker environment and adjusts localhost URLs
to properly reach the host machine where Ollama is running.
Supported Docker host resolution (in order of preference):
1. OLLAMA_HOST environment variable (if set)
2. host.docker.internal (Docker Desktop for Mac/Windows)
3. Docker bridge gateway IP (typically 172.17.0.1 on Linux)
4. Fallback to 172.17.0.1
Example configuration that will be automatically adjusted:
{
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.1:latest",
"ollama_base_url": "http://localhost:11434" # Auto-adjusted in Docker
}
}
}
"""
import hashlib
import json
import os
import socket
from app.database import SessionLocal
from app.models import Config as ConfigModel
from mem0 import Memory
_memory_client = None
_config_hash = None
def _get_config_hash(config_dict):
"""Generate a hash of the config to detect changes."""
config_str = json.dumps(config_dict, sort_keys=True)
return hashlib.md5(config_str.encode()).hexdigest()
def _get_docker_host_url():
"""
Determine the appropriate host URL to reach host machine from inside Docker container.
Returns the best available option for reaching the host from inside a container.
"""
# Check for custom environment variable first
custom_host = os.environ.get('OLLAMA_HOST')
if custom_host:
print(f"Using custom Ollama host from OLLAMA_HOST: {custom_host}")
return custom_host.replace('http://', '').replace('https://', '').split(':')[0]
# Check if we're running inside Docker
if not os.path.exists('/.dockerenv'):
# Not in Docker, return localhost as-is
return "localhost"
print("Detected Docker environment, adjusting host URL for Ollama...")
# Try different host resolution strategies
host_candidates = []
# 1. host.docker.internal (works on Docker Desktop for Mac/Windows)
try:
socket.gethostbyname('host.docker.internal')
host_candidates.append('host.docker.internal')
print("Found host.docker.internal")
except socket.gaierror:
pass
# 2. Docker bridge gateway (typically 172.17.0.1 on Linux)
try:
with open('/proc/net/route', 'r') as f:
for line in f:
fields = line.strip().split()
if fields[1] == '00000000': # Default route
gateway_hex = fields[2]
gateway_ip = socket.inet_ntoa(bytes.fromhex(gateway_hex)[::-1])
host_candidates.append(gateway_ip)
print(f"Found Docker gateway: {gateway_ip}")
break
except (FileNotFoundError, IndexError, ValueError):
pass
# 3. Fallback to common Docker bridge IP
if not host_candidates:
host_candidates.append('172.17.0.1')
print("Using fallback Docker bridge IP: 172.17.0.1")
# Return the first available candidate
return host_candidates[0]
def _fix_ollama_urls(config_section):
"""
Fix Ollama URLs for Docker environment.
Replaces localhost URLs with appropriate Docker host URLs.
Sets default ollama_base_url if not provided.
"""
if not config_section or "config" not in config_section:
return config_section
ollama_config = config_section["config"]
# Set default ollama_base_url if not provided
if "ollama_base_url" not in ollama_config:
ollama_config["ollama_base_url"] = "http://host.docker.internal:11434"
else:
# Check for ollama_base_url and fix if it's localhost
url = ollama_config["ollama_base_url"]
if "localhost" in url or "127.0.0.1" in url:
docker_host = _get_docker_host_url()
if docker_host != "localhost":
new_url = url.replace("localhost", docker_host).replace("127.0.0.1", docker_host)
ollama_config["ollama_base_url"] = new_url
print(f"Adjusted Ollama URL from {url} to {new_url}")
return config_section
def reset_memory_client():
"""Reset the global memory client to force reinitialization with new config."""
global _memory_client, _config_hash
_memory_client = None
_config_hash = None
def get_default_memory_config():
"""Get default memory client configuration with sensible defaults."""
# Detect vector store based on environment variables
vector_store_config = {
"collection_name": "openmemory",
"host": "mem0_store",
}
# Check for different vector store configurations based on environment variables
if os.environ.get('CHROMA_HOST') and os.environ.get('CHROMA_PORT'):
vector_store_provider = "chroma"
vector_store_config.update({
"host": os.environ.get('CHROMA_HOST'),
"port": int(os.environ.get('CHROMA_PORT'))
})
elif os.environ.get('QDRANT_HOST') and os.environ.get('QDRANT_PORT'):
vector_store_provider = "qdrant"
vector_store_config.update({
"host": os.environ.get('QDRANT_HOST'),
"port": int(os.environ.get('QDRANT_PORT'))
})
elif os.environ.get('WEAVIATE_CLUSTER_URL') or (os.environ.get('WEAVIATE_HOST') and os.environ.get('WEAVIATE_PORT')):
vector_store_provider = "weaviate"
# Prefer an explicit cluster URL if provided; otherwise build from host/port
cluster_url = os.environ.get('WEAVIATE_CLUSTER_URL')
if not cluster_url:
weaviate_host = os.environ.get('WEAVIATE_HOST')
weaviate_port = int(os.environ.get('WEAVIATE_PORT'))
cluster_url = f"http://{weaviate_host}:{weaviate_port}"
vector_store_config = {
"collection_name": "openmemory",
"cluster_url": cluster_url
}
elif os.environ.get('REDIS_URL'):
vector_store_provider = "redis"
vector_store_config = {
"collection_name": "openmemory",
"redis_url": os.environ.get('REDIS_URL')
}
elif os.environ.get('PG_HOST') and os.environ.get('PG_PORT'):
vector_store_provider = "pgvector"
vector_store_config.update({
"host": os.environ.get('PG_HOST'),
"port": int(os.environ.get('PG_PORT')),
"dbname": os.environ.get('PG_DB', 'mem0'),
"user": os.environ.get('PG_USER', 'mem0'),
"password": os.environ.get('PG_PASSWORD', 'mem0')
})
elif os.environ.get('MILVUS_HOST') and os.environ.get('MILVUS_PORT'):
vector_store_provider = "milvus"
# Construct the full URL as expected by MilvusDBConfig
milvus_host = os.environ.get('MILVUS_HOST')
milvus_port = int(os.environ.get('MILVUS_PORT'))
milvus_url = f"http://{milvus_host}:{milvus_port}"
vector_store_config = {
"collection_name": "openmemory",
"url": milvus_url,
"token": os.environ.get('MILVUS_TOKEN', ''), # Always include, empty string for local setup
"db_name": os.environ.get('MILVUS_DB_NAME', ''),
"embedding_model_dims": 1536,
"metric_type": "COSINE" # Using COSINE for better semantic similarity
}
elif os.environ.get('ELASTICSEARCH_HOST') and os.environ.get('ELASTICSEARCH_PORT'):
vector_store_provider = "elasticsearch"
# Construct the full URL with scheme since Elasticsearch client expects it
elasticsearch_host = os.environ.get('ELASTICSEARCH_HOST')
elasticsearch_port = int(os.environ.get('ELASTICSEARCH_PORT'))
# Use http:// scheme since we're not using SSL
full_host = f"http://{elasticsearch_host}"
vector_store_config.update({
"host": full_host,
"port": elasticsearch_port,
"user": os.environ.get('ELASTICSEARCH_USER', 'elastic'),
"password": os.environ.get('ELASTICSEARCH_PASSWORD', 'changeme'),
"verify_certs": False,
"use_ssl": False,
"embedding_model_dims": 1536
})
elif os.environ.get('OPENSEARCH_HOST') and os.environ.get('OPENSEARCH_PORT'):
vector_store_provider = "opensearch"
vector_store_config.update({
"host": os.environ.get('OPENSEARCH_HOST'),
"port": int(os.environ.get('OPENSEARCH_PORT'))
})
elif os.environ.get('FAISS_PATH'):
vector_store_provider = "faiss"
vector_store_config = {
"collection_name": "openmemory",
"path": os.environ.get('FAISS_PATH'),
"embedding_model_dims": 1536,
"distance_strategy": "cosine"
}
else:
# Default fallback to Qdrant
vector_store_provider = "qdrant"
vector_store_config.update({
"port": 6333,
})
print(f"Auto-detected vector store: {vector_store_provider} with config: {vector_store_config}")
return {
"vector_store": {
"provider": vector_store_provider,
"config": vector_store_config
},
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
"temperature": 0.1,
"max_tokens": 2000,
"api_key": "env:OPENAI_API_KEY"
}
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "env:OPENAI_API_KEY"
}
},
"version": "v1.1"
}
def _parse_environment_variables(config_dict):
"""
Parse environment variables in config values.
Converts 'env:VARIABLE_NAME' to actual environment variable values.
"""
if isinstance(config_dict, dict):
parsed_config = {}
for key, value in config_dict.items():
if isinstance(value, str) and value.startswith("env:"):
env_var = value.split(":", 1)[1]
env_value = os.environ.get(env_var)
if env_value:
parsed_config[key] = env_value
print(f"Loaded {env_var} from environment for {key}")
else:
print(f"Warning: Environment variable {env_var} not found, keeping original value")
parsed_config[key] = value
elif isinstance(value, dict):
parsed_config[key] = _parse_environment_variables(value)
else:
parsed_config[key] = value
return parsed_config
return config_dict
def get_memory_client(custom_instructions: str = None):
"""
Get or initialize the Mem0 client.
Args:
custom_instructions: Optional instructions for the memory project.
Returns:
Initialized Mem0 client instance or None if initialization fails.
Raises:
Exception: If required API keys are not set or critical configuration is missing.
"""
global _memory_client, _config_hash
try:
# Start with default configuration
config = get_default_memory_config()
# Variable to track custom instructions
db_custom_instructions = None
# Load configuration from database
try:
db = SessionLocal()
db_config = db.query(ConfigModel).filter(ConfigModel.key == "main").first()
if db_config:
json_config = db_config.value
# Extract custom instructions from openmemory settings
if "openmemory" in json_config and "custom_instructions" in json_config["openmemory"]:
db_custom_instructions = json_config["openmemory"]["custom_instructions"]
# Override defaults with configurations from the database
if "mem0" in json_config:
mem0_config = json_config["mem0"]
# Update LLM configuration if available
if "llm" in mem0_config and mem0_config["llm"] is not None:
config["llm"] = mem0_config["llm"]
# Fix Ollama URLs for Docker if needed
if config["llm"].get("provider") == "ollama":
config["llm"] = _fix_ollama_urls(config["llm"])
# Update Embedder configuration if available
if "embedder" in mem0_config and mem0_config["embedder"] is not None:
config["embedder"] = mem0_config["embedder"]
# Fix Ollama URLs for Docker if needed
if config["embedder"].get("provider") == "ollama":
config["embedder"] = _fix_ollama_urls(config["embedder"])
if "vector_store" in mem0_config and mem0_config["vector_store"] is not None:
config["vector_store"] = mem0_config["vector_store"]
else:
print("No configuration found in database, using defaults")
db.close()
except Exception as e:
print(f"Warning: Error loading configuration from database: {e}")
print("Using default configuration")
# Continue with default configuration if database config can't be loaded
# Use custom_instructions parameter first, then fall back to database value
instructions_to_use = custom_instructions or db_custom_instructions
if instructions_to_use:
config["custom_fact_extraction_prompt"] = instructions_to_use
# ALWAYS parse environment variables in the final config
# This ensures that even default config values like "env:OPENAI_API_KEY" get parsed
print("Parsing environment variables in final config...")
config = _parse_environment_variables(config)
# Check if config has changed by comparing hashes
current_config_hash = _get_config_hash(config)
# Only reinitialize if config changed or client doesn't exist
if _memory_client is None or _config_hash != current_config_hash:
print(f"Initializing memory client with config hash: {current_config_hash}")
try:
_memory_client = Memory.from_config(config_dict=config)
_config_hash = current_config_hash
print("Memory client initialized successfully")
except Exception as init_error:
print(f"Warning: Failed to initialize memory client: {init_error}")
print("Server will continue running with limited memory functionality")
_memory_client = None
_config_hash = None
return None
return _memory_client
except Exception as e:
print(f"Warning: Exception occurred while initializing memory client: {e}")
print("Server will continue running with limited memory functionality")
return None
def get_default_user_id():
return "default_user"
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/utils/memory.py",
"license": "Apache License 2.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/app/utils/permissions.py | from typing import Optional
from uuid import UUID
from app.models import App, Memory, MemoryState
from sqlalchemy.orm import Session
def check_memory_access_permissions(
db: Session,
memory: Memory,
app_id: Optional[UUID] = None
) -> bool:
"""
Check if the given app has permission to access a memory based on:
1. Memory state (must be active)
2. App state (must not be paused)
3. App-specific access controls
Args:
db: Database session
memory: Memory object to check access for
app_id: Optional app ID to check permissions for
Returns:
bool: True if access is allowed, False otherwise
"""
# Check if memory is active
if memory.state != MemoryState.active:
return False
# If no app_id provided, only check memory state
if not app_id:
return True
# Check if app exists and is active
app = db.query(App).filter(App.id == app_id).first()
if not app:
return False
# Check if app is paused/inactive
if not app.is_active:
return False
# Check app-specific access controls
from app.routers.memories import get_accessible_memory_ids
accessible_memory_ids = get_accessible_memory_ids(db, app_id)
# If accessible_memory_ids is None, all memories are accessible
if accessible_memory_ids is None:
return True
# Check if memory is in the accessible set
return memory.id in accessible_memory_ids
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/utils/permissions.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:openmemory/api/app/utils/prompts.py | MEMORY_CATEGORIZATION_PROMPT = """Your task is to assign each piece of information (or “memory”) to one or more of the following categories. Feel free to use multiple categories per item when appropriate.
- Personal: family, friends, home, hobbies, lifestyle
- Relationships: social network, significant others, colleagues
- Preferences: likes, dislikes, habits, favorite media
- Health: physical fitness, mental health, diet, sleep
- Travel: trips, commutes, favorite places, itineraries
- Work: job roles, companies, projects, promotions
- Education: courses, degrees, certifications, skills development
- Projects: to‑dos, milestones, deadlines, status updates
- AI, ML & Technology: infrastructure, algorithms, tools, research
- Technical Support: bug reports, error logs, fixes
- Finance: income, expenses, investments, billing
- Shopping: purchases, wishlists, returns, deliveries
- Legal: contracts, policies, regulations, privacy
- Entertainment: movies, music, games, books, events
- Messages: emails, SMS, alerts, reminders
- Customer Support: tickets, inquiries, resolutions
- Product Feedback: ratings, bug reports, feature requests
- News: articles, headlines, trending topics
- Organization: meetings, appointments, calendars
- Goals: ambitions, KPIs, long‑term objectives
Guidelines:
- Return only the categories under 'categories' key in the JSON format.
- If you cannot categorize the memory, return an empty list with key 'categories'.
- Don't limit yourself to the categories listed above only. Feel free to create new categories based on the memory. Make sure that it is a single phrase.
"""
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/app/utils/prompts.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mem0ai/mem0:openmemory/api/main.py | import datetime
from uuid import uuid4
from app.config import DEFAULT_APP_ID, USER_ID
from app.database import Base, SessionLocal, engine
from app.mcp_server import setup_mcp_server
from app.models import App, User
from app.routers import apps_router, backup_router, config_router, memories_router, stats_router
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi_pagination import add_pagination
app = FastAPI(title="OpenMemory API")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Create all tables
Base.metadata.create_all(bind=engine)
# Check for USER_ID and create default user if needed
def create_default_user():
db = SessionLocal()
try:
# Check if user exists
user = db.query(User).filter(User.user_id == USER_ID).first()
if not user:
# Create default user
user = User(
id=uuid4(),
user_id=USER_ID,
name="Default User",
created_at=datetime.datetime.now(datetime.UTC)
)
db.add(user)
db.commit()
finally:
db.close()
def create_default_app():
db = SessionLocal()
try:
user = db.query(User).filter(User.user_id == USER_ID).first()
if not user:
return
# Check if app already exists
existing_app = db.query(App).filter(
App.name == DEFAULT_APP_ID,
App.owner_id == user.id
).first()
if existing_app:
return
app = App(
id=uuid4(),
name=DEFAULT_APP_ID,
owner_id=user.id,
created_at=datetime.datetime.now(datetime.UTC),
updated_at=datetime.datetime.now(datetime.UTC),
)
db.add(app)
db.commit()
finally:
db.close()
# Create default user on startup
create_default_user()
create_default_app()
# Setup MCP server
setup_mcp_server(app)
# Include routers
app.include_router(memories_router)
app.include_router(apps_router)
app.include_router(stats_router)
app.include_router(config_router)
app.include_router(backup_router)
# Add pagination support
add_pagination(app)
| {
"repo_id": "mem0ai/mem0",
"file_path": "openmemory/api/main.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mem0ai/mem0:mem0/embeddings/aws_bedrock.py | import json
import os
from typing import Literal, Optional
try:
import boto3
except ImportError:
raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
import numpy as np
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class AWSBedrockEmbedding(EmbeddingBase):
"""AWS Bedrock embedding implementation.
This class uses AWS Bedrock's embedding models.
"""
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "amazon.titan-embed-text-v1"
# Get AWS config from environment variables or use defaults
aws_access_key = os.environ.get("AWS_ACCESS_KEY_ID", "")
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
aws_session_token = os.environ.get("AWS_SESSION_TOKEN", "")
# Check if AWS config is provided in the config
if hasattr(self.config, "aws_access_key_id"):
aws_access_key = self.config.aws_access_key_id
if hasattr(self.config, "aws_secret_access_key"):
aws_secret_key = self.config.aws_secret_access_key
# AWS region is always set in config - see BaseEmbedderConfig
aws_region = self.config.aws_region or "us-west-2"
self.client = boto3.client(
"bedrock-runtime",
region_name=aws_region,
aws_access_key_id=aws_access_key if aws_access_key else None,
aws_secret_access_key=aws_secret_key if aws_secret_key else None,
aws_session_token=aws_session_token if aws_session_token else None,
)
def _normalize_vector(self, embeddings):
"""Normalize the embedding to a unit vector."""
emb = np.array(embeddings)
norm_emb = emb / np.linalg.norm(emb)
return norm_emb.tolist()
def _get_embedding(self, text):
"""Call out to Bedrock embedding endpoint."""
# Format input body based on the provider
provider = self.config.model.split(".")[0]
input_body = {}
if provider == "cohere":
input_body["input_type"] = "search_document"
input_body["texts"] = [text]
else:
# Amazon and other providers
input_body["inputText"] = text
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.config.model,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
if provider == "cohere":
embeddings = response_body.get("embeddings")[0]
else:
embeddings = response_body.get("embedding")
return embeddings
except Exception as e:
raise ValueError(f"Error getting embedding from AWS Bedrock: {e}")
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using AWS Bedrock.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
return self._get_embedding(text)
| {
"repo_id": "mem0ai/mem0",
"file_path": "mem0/embeddings/aws_bedrock.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mem0ai/mem0:tests/memory/test_main.py | import logging
from unittest.mock import MagicMock
import pytest
from mem0.memory.main import AsyncMemory, Memory
def _setup_mocks(mocker):
"""Helper to setup common mocks for both sync and async fixtures"""
mock_embedder = mocker.MagicMock()
mock_embedder.return_value.embed.return_value = [0.1, 0.2, 0.3]
mocker.patch("mem0.utils.factory.EmbedderFactory.create", mock_embedder)
mock_vector_store = mocker.MagicMock()
mock_vector_store.return_value.search.return_value = []
mocker.patch(
"mem0.utils.factory.VectorStoreFactory.create", side_effect=[mock_vector_store.return_value, mocker.MagicMock()]
)
mock_llm = mocker.MagicMock()
mocker.patch("mem0.utils.factory.LlmFactory.create", mock_llm)
mocker.patch("mem0.memory.storage.SQLiteManager", mocker.MagicMock())
return mock_llm, mock_vector_store
class TestAddToVectorStoreErrors:
@pytest.fixture
def mock_memory(self, mocker):
"""Fixture that returns a Memory instance with mocker-based mocks"""
mock_llm, _ = _setup_mocks(mocker)
memory = Memory()
memory.config = mocker.MagicMock()
memory.config.custom_fact_extraction_prompt = None
memory.config.custom_update_memory_prompt = None
memory.api_version = "v1.1"
return memory
def test_empty_llm_response_fact_extraction(self, mocker, mock_memory, caplog):
"""Test empty response from LLM during fact extraction"""
# Setup
mock_memory.llm.generate_response.return_value = "invalid json" # This will trigger a JSON decode error
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
# Execute
with caplog.at_level(logging.ERROR):
result = mock_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
)
# Verify
assert mock_memory.llm.generate_response.call_count == 1
assert result == [] # Should return empty list when no memories processed
# Check for error message in any of the log records
assert any("Error in new_retrieved_facts" in record.msg for record in caplog.records), "Expected error message not found in logs"
assert mock_capture_event.call_count == 1
def test_empty_llm_response_memory_actions(self, mock_memory, caplog):
"""Test empty response from LLM during memory actions"""
# Setup
# First call returns valid JSON, second call returns empty string
mock_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
# Execute
with caplog.at_level(logging.WARNING):
result = mock_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
)
# Verify
assert mock_memory.llm.generate_response.call_count == 2
assert result == [] # Should return empty list when no memories processed
assert "Empty response from LLM, no memories to extract" in caplog.text
@pytest.mark.asyncio
class TestAsyncAddToVectorStoreErrors:
@pytest.fixture
def mock_async_memory(self, mocker):
"""Fixture for AsyncMemory with mocker-based mocks"""
mock_llm, _ = _setup_mocks(mocker)
memory = AsyncMemory()
memory.config = mocker.MagicMock()
memory.config.custom_fact_extraction_prompt = None
memory.config.custom_update_memory_prompt = None
memory.api_version = "v1.1"
return memory
@pytest.mark.asyncio
async def test_async_empty_llm_response_fact_extraction(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.return_value = "invalid json" # This will trigger a JSON decode error
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert mock_async_memory.llm.generate_response.call_count == 1
assert result == []
# Check for error message in any of the log records
assert any("Error in new_retrieved_facts" in record.msg for record in caplog.records), "Expected error message not found in logs"
assert mock_capture_event.call_count == 1
@pytest.mark.asyncio
async def test_async_empty_llm_response_memory_actions(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.WARNING):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert result == []
assert "Empty response from LLM, no memories to extract" in caplog.text
assert mock_capture_event.call_count == 1
| {
"repo_id": "mem0ai/mem0",
"file_path": "tests/memory/test_main.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.