sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
run-llama/llama_index:llama-index-integrations/observability/llama-index-observability-otel/llama_index/observability/otel/utils.py | import json
from collections.abc import Sequence as ABCSequence
from typing import Any
BASE_TYPES = (int, str, bool, bytes, float)
def _is_otel_supported_type(obj: Any) -> bool:
# If it's one of the base types
if isinstance(obj, BASE_TYPES):
return True
# If it's a sequence (but not a string or bytes, which are sequences too)
if isinstance(obj, ABCSequence) and not isinstance(obj, (str, bytes)):
return all(isinstance(item, BASE_TYPES) for item in obj)
return False
def filter_model_fields(model_dict: dict) -> dict:
newdict = {}
for field in model_dict:
if _is_otel_supported_type(model_dict[field]):
newdict.update({field: model_dict[field]})
return newdict
def flatten_dict(d: dict, parent_key: str = "", sep: str = ".") -> dict:
"""
Flatten a nested dictionary into a single-level dict with dot-notation keys.
Nested dicts are recursively flattened. Values that are OTel-supported types
are kept as-is. Unsupported types (e.g., nested lists of dicts) are JSON
serialized to preserve the data.
Example:
{"user": {"name": "alice", "age": 30}}
becomes
{"user.name": "alice", "user.age": 30}
"""
items: list[tuple[str, Any]] = []
for k, v in d.items():
new_key = f"{parent_key}{sep}{k}" if parent_key else k
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key, sep).items())
elif _is_otel_supported_type(v):
items.append((new_key, v))
else:
# Fallback: JSON serialize unsupported types to preserve data
items.append((new_key, json.dumps(v, default=str)))
return dict(items)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/observability/llama-index-observability-otel/llama_index/observability/otel/utils.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-azure/llama_index/agent/azure_foundry_agent/base.py | import asyncio
import json
from typing import List, Sequence, Optional
from azure.ai.agents.models import (
MessageInputContentBlock,
SubmitToolOutputsAction,
RequiredFunctionToolCall,
FunctionTool,
ToolSet,
)
from azure.ai.agents.models._models import ThreadRun
from azure.ai.projects.aio import AIProjectClient
from azure.identity.aio import DefaultAzureCredential
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import AgentOutput, ToolCallResult
from llama_index.core.llms import ChatMessage, MockLLM
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool, ToolSelection
from llama_index.core.workflow import Context
from llama_index.core.tools import FunctionTool as LLamaIndexFunctionTool
from llama_index.core.base.llms.types import ChatMessage, TextBlock, ImageBlock
class AzureFoundryAgent(BaseWorkflowAgent):
"""
Workflow-compatible Azure Foundry Agent for multi-agent orchestration.
Inherits from BaseWorkflowAgent.
Implements async methods for workflow integration using the async Azure SDK.
"""
def __init__(
self,
endpoint: str,
model: str = "gpt-4o-mini",
name: str = "azure-agent",
instructions: str = "You are a helpful agent",
thread_id: Optional[str] = None,
agent_id: Optional[str] = None,
run_retrieve_sleep_time: float = 1.0,
verbose: bool = False,
**kwargs,
):
super().__init__(name=name, llm=MockLLM(), **kwargs)
self._endpoint = endpoint
self._model = model
self._instructions = instructions
self._run_retrieve_sleep_time = run_retrieve_sleep_time
self._thread_id = thread_id
self._agent_id = agent_id
self._agent = None
self._run_id = None
self._credential = DefaultAzureCredential()
self._client = AIProjectClient(endpoint=endpoint, credential=self._credential)
self._verbose = verbose
# self.tools = tools if tools is not None else []
self._toolset = ToolSet()
async def _ensure_agent(self, tools: Sequence[AsyncBaseTool]) -> None:
if self._agent is None:
if self._agent_id is not None:
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Fetching existing agent with id={self._agent_id}"
)
self._agent = await self._client.agents.get_agent(self._agent_id)
else:
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Creating new agent with model={self._model}, name={self.name}"
)
func_tools = []
for t in tools or []:
if isinstance(t, LLamaIndexFunctionTool):
func_tools.append(t.fn)
if func_tools:
self._toolset.add(FunctionTool(functions=set(func_tools)))
self._agent = await self._client.agents.create_agent(
model=self._model,
name=self.name,
instructions=self._instructions,
toolset=self._toolset,
)
self._agent_id = self._agent.id
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Created agent with id={self._agent_id}"
)
if self._thread_id is None:
if self._verbose:
print(f"[AzureFoundryWorkflowAgent] Creating new thread.")
thread = await self._client.agents.threads.create()
self._thread_id = thread.id
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Created thread with id={self._thread_id}"
)
def _llama_to_azure_content_blocks(
self, chat_messages: List[ChatMessage]
) -> list[MessageInputContentBlock]:
"""
Internal: Convert a list of LlamaIndex ChatMessage to a list of Azure MessageInputContentBlock.
Supports text and image blocks. Extend as needed for audio/document.
"""
from azure.ai.agents.models import (
MessageInputTextBlock,
MessageInputImageFileBlock,
MessageInputImageUrlBlock,
MessageImageFileParam,
MessageImageUrlParam,
)
azure_blocks: list[MessageInputContentBlock] = []
for msg in chat_messages:
for block in getattr(msg, "blocks", []):
if isinstance(block, TextBlock):
azure_blocks.append(MessageInputTextBlock(text=block.text))
elif isinstance(block, ImageBlock):
if block.path or block.image:
file_id = str(block.path) if block.path else None
if file_id:
azure_blocks.append(
MessageInputImageFileBlock(
image_file=MessageImageFileParam(
file_id=file_id, detail=block.detail
)
)
)
elif block.url:
azure_blocks.append(
MessageInputImageUrlBlock(
image_url=MessageImageUrlParam(
url=str(block.url), detail=block.detail
)
)
)
else:
raise ValueError(f"Unsupported block type: {type(block)}")
return azure_blocks
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""
Take a single step with the Azure Foundry agent.
Interacts with Azure backend and returns AgentOutput (response, tool_calls, etc).
"""
# Convert the entire llm_input to Azure content blocks
azure_content_blocks = (
self._llama_to_azure_content_blocks(llm_input) if llm_input else []
)
await self._ensure_agent(tools=tools)
assert self._thread_id is not None, (
"Thread ID must be set after _ensure_agent()"
)
assert self._agent is not None, "Agent must be set after _ensure_agent()"
tool_calls = []
response_msg = None
# Only send a user message if there is new user input
if azure_content_blocks:
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Sending user message blocks to thread_id={self._thread_id}"
)
await self._client.agents.messages.create(
thread_id=self._thread_id, role="user", content=azure_content_blocks
)
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Starting run for agent_id={self._agent.id} on thread_id={self._thread_id}"
)
run = await self._client.agents.runs.create(
thread_id=self._thread_id, agent_id=self._agent.id
)
self._run_id = run.id
current_run = run
while current_run.status in ["queued", "in_progress", "requires_action"]:
await asyncio.sleep(self._run_retrieve_sleep_time)
current_run = await self._client.agents.runs.get(
thread_id=self._thread_id, run_id=self._run_id
)
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Run status: {current_run.status}"
)
if current_run.status == "requires_action":
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Run requires action: {getattr(current_run, 'required_action', None)}"
)
break
if current_run.status == "failed":
return AgentOutput(
response=ChatMessage(role="assistant", content="Run failed."),
tool_calls=[],
raw=current_run,
current_agent_name=self.name,
)
required_action = getattr(current_run, "required_action", None)
if (
required_action
and getattr(required_action, "type", None) == "submit_tool_outputs"
and isinstance(required_action, SubmitToolOutputsAction)
):
submit_tool_outputs = required_action.submit_tool_outputs
for call in getattr(submit_tool_outputs, "tool_calls", []):
# For function tool calls
if isinstance(call, RequiredFunctionToolCall):
function = getattr(call, "function", None)
tool_name = getattr(function, "name", "") if function else ""
arguments = (
getattr(function, "arguments", "{}") if function else "{}"
)
try:
tool_kwargs = json.loads(arguments)
except Exception:
tool_kwargs = {}
tool_calls.append(
ToolSelection(
tool_id=getattr(call, "id", ""),
tool_name=tool_name,
tool_kwargs=tool_kwargs,
)
)
# Get the latest assistant message if available
latest_msg = None
async for msg in self._client.agents.messages.list(
thread_id=self._thread_id, run_id=self._run_id, order="desc"
):
if getattr(msg, "role", None) == "assistant" and getattr(
msg, "content", None
):
latest_msg = self._from_azure_thread_message(msg)
break
# If no assistant message found, try to get the last assistant message in the thread
if not latest_msg:
async for msg in self._client.agents.messages.list(
thread_id=self._thread_id, order="desc"
):
if getattr(msg, "role", None) == "assistant" and getattr(
msg, "content", None
):
latest_msg = self._from_azure_thread_message(msg)
break
response_msg = (
latest_msg
if latest_msg
else ChatMessage(role="assistant", content="No response from agent.")
)
else:
# No new user input: fetch the latest assistant message after tool call resolution
latest_msg = None
async for msg in self._client.agents.messages.list(
thread_id=self._thread_id, order="desc"
):
if getattr(msg, "role", None) == "assistant" and getattr(
msg, "content", None
):
latest_msg = self._from_azure_thread_message(msg)
break
response_msg = (
latest_msg
if latest_msg
else ChatMessage(role="assistant", content="No response from agent.")
)
return AgentOutput(
response=response_msg,
tool_calls=tool_calls,
raw=current_run if azure_content_blocks else None,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""
Handle tool call results for Azure Foundry agent.
Submits results to Azure backend and updates state/context as needed.
Waits for run to reach a terminal state or another action required.
Also appends tool call results to the scratchpad for context tracking.
"""
# Convert ToolCallResult to Azure tool_outputs format
tool_outputs = []
for result in results:
tool_outputs.append(
{
"tool_call_id": result.tool_id,
"output": result.tool_output.content,
}
)
# Submit tool outputs to Azure
assert self._thread_id is not None, "Thread ID must be set."
assert self._run_id is not None, "Run ID must be set."
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Submitting tool call results for run_id={self._run_id} on thread_id={self._thread_id}: {tool_outputs}"
)
await self._client.agents.runs.submit_tool_outputs(
thread_id=self._thread_id, run_id=self._run_id, tool_outputs=tool_outputs
)
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Tool outputs submitted. Waiting for run to reach terminal state or next action required..."
)
# Wait for run to reach a terminal state or another action required
while True:
run: ThreadRun = await self._client.agents.runs.get(
thread_id=self._thread_id, run_id=self._run_id
)
if run.status not in ["queued", "in_progress", "requires_action"]:
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Run reached terminal state: {run.status}"
)
# Print detailed debug info if failed
if run.status == "failed":
print(
"[AzureFoundryWorkflowAgent][DEBUG] Run failed. Full run object:"
)
print(run)
# Try to print error fields if present
error_fields = [
"error",
"last_error",
"failure_reason",
"failure_message",
]
for field in error_fields:
if hasattr(run, field):
print(
f"[AzureFoundryWorkflowAgent][DEBUG] {field}: {getattr(run, field)}"
)
break
if run.status == "requires_action":
if self._verbose:
print(
f"[AzureFoundryWorkflowAgent] Run requires another action: {getattr(run, 'required_action', None)}"
)
break
await asyncio.sleep(self._run_retrieve_sleep_time)
tool_message = f"A tool call was executed : {results!s}"
await self._client.agents.messages.create(
thread_id=self._thread_id, role="assistant", content=tool_message
)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""
Finalize the agent's execution (persist state, cleanup, etc).
For AzureFoundryWorkflowAgent, this can be a no-op or can persist any final state if needed.
"""
# Optionally, persist any final state to memory or context
# For now, just return the output as-is
return output
async def close(self):
"""
Close the underlying async Azure client session and credential.
"""
if self._verbose:
print(f"[AzureFoundryWorkflowAgent] Closing the session.")
await self._client.close()
await self._credential.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def _from_azure_thread_message(self, thread_message: object) -> ChatMessage:
"""
Convert an Azure/OpenAI thread message to a LlamaIndex ChatMessage.
Supports text and image_url content blocks for multimodal support.
"""
from llama_index.core.base.llms.types import ChatMessage, TextBlock, ImageBlock
blocks = []
for t in getattr(thread_message, "content", []):
t_type = getattr(t, "type", None)
if t_type == "text":
text_val = getattr(getattr(t, "text", None), "value", "")
blocks.append(TextBlock(text=text_val))
elif t_type == "image_url":
url_val = getattr(getattr(t, "image_url", None), "url", None)
detail_val = getattr(getattr(t, "image_url", None), "detail", None)
if url_val:
blocks.append(ImageBlock(url=url_val, detail=detail_val))
# Compose content string for backward compatibility (concatenate text blocks)
content_str = " ".join([b.text for b in blocks if hasattr(b, "text")])
return ChatMessage(
role=getattr(thread_message, "role", ""),
content=content_str,
blocks=blocks,
additional_kwargs={
"thread_message": thread_message,
"thread_id": getattr(thread_message, "thread_id", None),
"assistant_id": getattr(thread_message, "assistant_id", None),
"id": getattr(thread_message, "id", None),
"metadata": getattr(thread_message, "metadata", None),
},
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-azure/llama_index/agent/azure_foundry_agent/base.py",
"license": "MIT License",
"lines": 393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-azure/tests/test_azure_foundry_agent.py | import pytest
import tempfile
import json
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
from azure.ai.agents.models import (
MessageInputTextBlock,
MessageInputImageFileBlock,
MessageInputImageUrlBlock,
)
from azure.ai.projects import AIProjectClient
from azure.ai.agents.models import Agent as AzureAgent, AgentThread
from llama_index.agent.azure_foundry_agent.base import AzureFoundryAgent
from llama_index.core.base.llms.types import ChatMessage, TextBlock, ImageBlock
from llama_index.core.agent.workflow.multi_agent_workflow import AgentWorkflow
from llama_index.core.memory import ChatMemoryBuffer
# Helper for async iteration (ensure only one definition)
class DummyAsyncIterator:
def __init__(self, items):
self._items = items
def __aiter__(self):
self._iter = iter(self._items)
return self
async def __anext__(self):
try:
return next(self._iter)
except StopIteration:
raise StopAsyncIteration
def test_azure_foundry_agent_constructor():
"""Test the constructor of AzureFoundryAgent."""
endpoint = "https://test-endpoint.com"
model = "gpt-4o"
name = "test-azure-agent"
instructions = "You are a test agent."
thread_id = "test-thread-123"
verbose = True
run_retrieve_sleep_time = 0.5
mock_project_client_instance = MagicMock(spec=AIProjectClient)
mock_azure_agent_instance = MagicMock(spec=AzureAgent)
mock_azure_agent_instance.id = "mock_agent_id_123"
mock_thread_instance = MagicMock(spec=AgentThread)
mock_thread_instance.id = thread_id
# Patch async methods with AsyncMock
mock_project_client_instance.agents.create_agent = AsyncMock(
return_value=mock_azure_agent_instance
)
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance
)
# Mock DefaultAzureCredential to avoid actual credential loading
with patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential", MagicMock()
) as mock_default_credential:
# Mock AIProjectClient constructor to return our mock instance
with patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient",
return_value=mock_project_client_instance,
) as mock_ai_project_client_constructor:
# Mock the create_agent call
mock_project_client_instance.agents.create_agent.return_value = (
mock_azure_agent_instance
)
# Mock the threads.create call for when thread_id is None
mock_project_client_instance.agents.threads.create.return_value = (
mock_thread_instance
)
# Test case 1: Initialize with a specific thread_id
agent_with_thread = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=thread_id,
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
mock_ai_project_client_constructor.assert_called_once_with(
endpoint=endpoint, credential=mock_default_credential.return_value
)
# Ensure threads.create was NOT called because thread_id was provided
mock_project_client_instance.agents.threads.create.assert_not_called()
assert isinstance(agent_with_thread, AzureFoundryAgent)
assert agent_with_thread._endpoint == endpoint
assert agent_with_thread._model == model
assert agent_with_thread.name == name
assert agent_with_thread._instructions == instructions
assert agent_with_thread._thread_id == thread_id
assert agent_with_thread._verbose == verbose
assert agent_with_thread._run_retrieve_sleep_time == run_retrieve_sleep_time
assert agent_with_thread._client == mock_project_client_instance
# Reset mocks for the next instantiation test
mock_ai_project_client_constructor.reset_mock()
mock_project_client_instance.reset_mock()
mock_default_credential.reset_mock()
# Mock the threads.create call for when thread_id is None
# Re-assign thread_id for the new mock thread instance if it's different
new_mock_thread_id = "new-mock-thread-456"
mock_thread_instance_new = MagicMock(spec=AgentThread)
mock_thread_instance_new.id = new_mock_thread_id
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance_new
)
# Test case 2: Initialize without a specific thread_id (should create one)
agent_new_thread = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=None, # Test thread creation
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
assert agent_new_thread.name == name
assert agent_new_thread._client == mock_project_client_instance
# At this point, thread should not be created yet
mock_project_client_instance.agents.threads.create.assert_not_called()
# Now, trigger thread creation by calling _ensure_agent
import asyncio
asyncio.run(agent_new_thread._ensure_agent([]))
mock_project_client_instance.agents.threads.create.assert_called_once()
assert agent_new_thread._thread_id == new_mock_thread_id
@patch("azure.identity.aio.DefaultAzureCredential")
@patch("azure.ai.projects.aio.AIProjectClient")
@pytest.mark.asyncio # Added decorator
async def test_azure_foundry_agent_constructor_defaults( # Added async and mock arguments
mock_project_client_class: MagicMock, mock_credential_class: MagicMock
):
"""Test the constructor of AzureFoundryAgent with default values."""
endpoint = "https://test-endpoint.com"
model = "gpt-4o"
name = "test-azure-agent-defaults"
instructions = "You are a test agent. (defaults)"
thread_id = None
verbose = False
run_retrieve_sleep_time = 1.0
mock_project_client_instance = MagicMock(spec=AIProjectClient)
mock_azure_agent_instance = MagicMock(spec=AzureAgent)
mock_azure_agent_instance.id = "mock_agent_id_defaults"
mock_thread_instance = MagicMock(spec=AgentThread)
mock_thread_instance.id = "mock_thread_id_defaults"
# Patch async methods with AsyncMock
mock_project_client_instance.agents.create_agent = AsyncMock(
return_value=mock_azure_agent_instance
)
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance
)
with patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential", MagicMock()
):
with patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient",
return_value=mock_project_client_instance,
):
# Test initialization with defaults
agent_defaults = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=thread_id,
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
assert agent_defaults.name == name
assert agent_defaults._endpoint == endpoint
assert agent_defaults._model == model
assert agent_defaults._instructions == instructions
assert agent_defaults._thread_id is None
assert agent_defaults._verbose is False
assert agent_defaults._run_retrieve_sleep_time == run_retrieve_sleep_time
assert agent_defaults._client == mock_project_client_instance
# Ensure that create_agent and threads.create are called only after _ensure_agent
await agent_defaults._ensure_agent([])
print(
f"create_agent call count: {mock_project_client_instance.agents.create_agent.call_count}"
)
print(
f"threads.create call count: {mock_project_client_instance.agents.threads.create.call_count}"
)
mock_project_client_instance.agents.create_agent.assert_called_once()
mock_project_client_instance.agents.threads.create.assert_called_once()
# Check that the thread_id was set to the created thread's ID
assert agent_defaults._thread_id == mock_thread_instance.id
# Tests for _llama_to_azure_content_blocks
@pytest.mark.parametrize(
("desc", "chat_messages", "expected_types", "expected_values"),
[
(
"empty input",
[],
[],
[],
),
(
"text only",
[ChatMessage(role="user", blocks=[TextBlock(text="Hello")])],
[MessageInputTextBlock],
["Hello"],
),
(
"image url",
[
ChatMessage(
role="user",
blocks=[
ImageBlock(url="http://example.com/image.png", detail="low")
],
)
],
[MessageInputImageUrlBlock],
["http://example.com/image.png"],
),
(
"no blocks, just content",
[ChatMessage(role="user", content="Just text content, no blocks attr")],
[MessageInputTextBlock],
["Just text content, no blocks attr"],
),
(
"empty blocks",
[ChatMessage(role="user", blocks=[])],
[],
[],
),
(
"image block no path no url",
[
ChatMessage(
role="user",
blocks=[ImageBlock(image=b"some_image_data", detail="high")],
)
],
[],
[],
),
],
)
def test_llama_to_azure_content_blocks_param(
desc, chat_messages, expected_types, expected_values
):
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == len(expected_types)
for r, t, v in zip(result, expected_types, expected_values):
assert isinstance(r, t)
# Check value for text or url
if isinstance(r, MessageInputTextBlock):
assert r.text == v
elif isinstance(r, MessageInputImageUrlBlock):
assert r.image_url.url == v
def test_llama_to_azure_content_blocks_image_path_and_mixed():
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
# image path
chat_messages = [
ChatMessage(
role="user", blocks=[ImageBlock(path=Path(tmp.name), detail="high")]
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 1
assert isinstance(result[0], MessageInputImageFileBlock)
assert result[0].image_file.file_id == tmp.name
assert result[0].image_file.detail == "high"
# mixed content
chat_messages = [
ChatMessage(
role="user",
blocks=[
TextBlock(text="Describe this image:"),
ImageBlock(path=Path(tmp.name)),
],
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 2
assert isinstance(result[0], MessageInputTextBlock)
assert result[0].text == "Describe this image:"
assert isinstance(result[1], MessageInputImageFileBlock)
assert result[1].image_file.file_id == tmp.name
# image block path preferred over image attr
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
chat_messages = [
ChatMessage(
role="user",
blocks=[ImageBlock(path=Path(tmp.name), image=b"image_bytes")],
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 1
assert isinstance(result[0], MessageInputImageFileBlock)
assert result[0].image_file.file_id == tmp.name
# image bytes only, should be skipped
chat_messages_bytes_only = [
ChatMessage(
role="user", blocks=[ImageBlock(image=b"image_bytes_data", detail="auto")]
)
]
result_bytes_only = agent._llama_to_azure_content_blocks(chat_messages_bytes_only)
assert len(result_bytes_only) == 0
def test_llama_to_azure_content_blocks_multiple_messages():
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
with tempfile.NamedTemporaryFile(suffix=".gif") as tmp:
chat_messages = [
ChatMessage(role="user", blocks=[TextBlock(text="First message.")]),
ChatMessage(
role="user", blocks=[ImageBlock(url="http://images.com/pic.png")]
),
ChatMessage(
role="user",
blocks=[
TextBlock(text="Third message text."),
ImageBlock(path=Path(tmp.name)),
],
),
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 4
assert isinstance(result[0], MessageInputTextBlock)
assert result[0].text == "First message."
assert isinstance(result[1], MessageInputImageUrlBlock)
assert result[1].image_url.url == "http://images.com/pic.png"
assert isinstance(result[2], MessageInputTextBlock)
assert result[2].text == "Third message text."
assert isinstance(result[3], MessageInputImageFileBlock)
assert result[3].image_file.file_id == tmp.name
# --- Workflow and tool call tests from the other file ---
@pytest.mark.asyncio
async def test_azure_foundry_agent_workflow():
with (
patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential",
MagicMock(),
),
patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient"
) as mock_client_class,
):
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.agents.create_agent = AsyncMock()
mock_client.agents.threads.create = AsyncMock()
mock_client.agents.get_agent = AsyncMock()
mock_client.agents.messages.create = AsyncMock()
mock_client.agents.runs.create = AsyncMock()
mock_client.agents.runs.get = AsyncMock()
mock_client.agents.messages.list.return_value = DummyAsyncIterator([])
mock_client.agents.runs.submit_tool_outputs = AsyncMock()
mock_client.close = AsyncMock()
agent = AzureFoundryAgent(
endpoint="https://fake-endpoint",
model="gpt-4o",
name="azure-agent",
instructions="Test agent",
verbose=True,
)
workflow = AgentWorkflow(
agents=[agent],
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Hello, agent!", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert response is not None
@pytest.mark.asyncio
async def test_azure_foundry_agent_tool_call():
with (
patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential",
MagicMock(),
),
patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient"
) as mock_client_class,
):
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.agents.create_agent = AsyncMock()
mock_client.agents.threads.create = AsyncMock()
mock_client.agents.get_agent = AsyncMock()
mock_client.agents.messages.create = AsyncMock()
mock_client.close = AsyncMock()
class DummyRun:
def __init__(self, status, required_action=None):
self.status = status
self.required_action = required_action
self.id = "runid"
class DummyRequiredAction:
type = "submit_tool_outputs"
submit_tool_outputs = SimpleNamespace(
tool_calls=[
SimpleNamespace(
id="toolid",
function=SimpleNamespace(
name="my_tool", arguments=json.dumps({"x": 1})
),
)
]
)
mock_client.agents.runs.create = AsyncMock(
return_value=DummyRun("requires_action", DummyRequiredAction())
)
mock_client.agents.runs.get = AsyncMock(
side_effect=[
DummyRun("requires_action", DummyRequiredAction()),
DummyRun("completed"),
]
)
assistant_message = SimpleNamespace(
role="assistant",
content=[
SimpleNamespace(
type="text", text=SimpleNamespace(value="Tool call complete!")
)
],
)
def messages_list_side_effect(*args, **kwargs):
return DummyAsyncIterator([assistant_message, assistant_message])
mock_client.agents.messages.list.side_effect = messages_list_side_effect
mock_client.agents.runs.submit_tool_outputs = AsyncMock()
agent = AzureFoundryAgent(
endpoint="https://fake-endpoint",
model="gpt-4o",
name="azure-agent",
instructions="Test agent",
verbose=True,
tools=[lambda x: x], # Dummy tool
)
workflow = AgentWorkflow(agents=[agent])
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Trigger tool", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert "Tool call complete!" in response.response.content
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-azure/tests/test_azure_foundry_agent.py",
"license": "MIT License",
"lines": 440,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-featherlessai/llama_index/llms/featherlessai/base.py | import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class FeatherlessLLM(OpenAILike):
"""
Featherless LLM.
Examples:
`pip install llama-index-llms-featherlessai`
```python
from llama_index.llms.featherlessai import FeatherlessLLM
# set api key in env or in llm
# import os
# os.environ["FEATHERLESS_API_KEY"] = "your api key"
llm = FeatherlessLLM(
model="Qwen/Qwen3-32B", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.featherless.ai/v1",
is_chat_model: bool = True,
context_window: Optional[int] = None,
is_function_calling_model: bool = False,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("FEATHERLESS_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
context_window=context_window,
is_function_calling_model=is_function_calling_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "FeatherlessLLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-featherlessai/llama_index/llms/featherlessai/base.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-featherlessai/tests/test_llms_featherlessai.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.featherlessai import FeatherlessLLM
def test_llm_class():
names_of_base_classes = [b.__name__ for b in FeatherlessLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-featherlessai/tests/test_llms_featherlessai.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-desearch/llama_index/tools/desearch/base.py | from pydantic import BaseModel, Field
from typing import Optional, List, Literal
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from desearch_py import Desearch
class TwitterScraperMedia(BaseModel):
media_url: str = ""
type: str = ""
class TwitterScraperUser(BaseModel):
# Available in both, scraped and api based tweets.
id: Optional[str] = Field(example="123456789")
url: Optional[str] = Field(example="https://x.com/example_user")
name: Optional[str] = Field(example="John Doe")
username: Optional[str] = Field(example="johndoe")
created_at: Optional[str] = Field(example="2023-01-01T00:00:00Z")
# Only available in scraped tweets
description: Optional[str] = Field(example="This is an example user description.")
favourites_count: Optional[int] = Field(example=100)
followers_count: Optional[int] = Field(example=1500)
listed_count: Optional[int] = Field(example=10)
media_count: Optional[int] = Field(example=50)
profile_image_url: Optional[str] = Field(example="https://example.com/profile.jpg")
statuses_count: Optional[int] = Field(example=500)
verified: Optional[bool] = Field(example=True)
class BasicTwitterSearchResponse(BaseModel):
# Available in both, scraped and api based tweets.
user: Optional[TwitterScraperUser]
id: Optional[str] = Field(example="987654321")
text: Optional[str] = Field(example="This is an example tweet.")
reply_count: Optional[int] = Field(example=10)
retweet_count: Optional[int] = Field(example=5)
like_count: Optional[int] = Field(example=100)
view_count: Optional[int] = Field(example=1000)
quote_count: Optional[int] = Field(example=2)
impression_count: Optional[int] = Field(example=1500)
bookmark_count: Optional[int] = Field(example=3)
url: Optional[str] = Field(example="https://x.com/example_tweet")
created_at: Optional[str] = Field(example="2023-01-01T00:00:00Z")
media: Optional[List[TwitterScraperMedia]] = Field(default_factory=list, example=[])
# Only available in scraped tweets
is_quote_tweet: Optional[bool] = Field(example=False)
is_retweet: Optional[bool] = Field(example=False)
class WebSearchResult(BaseModel):
title: str = Field(
..., description="EXCLUSIVE Major coffee buyers face losses as Colombia ..."
)
snippet: str = Field(
...,
description="Coffee farmers in Colombia, the world's No. 2 arabica producer, have failed to deliver up to 1 million bags of beans this year or nearly 10% ...",
)
link: str = Field(
...,
description="https://www.reuters.com/world/americas/exclusive-major-coffee-buyers-face-losses-colombia-farmers-fail-deliver-2021-10-11/",
)
date: Optional[str] = Field(
None, description="21 hours ago"
) # Optional, as it might not always be present
source: str = Field(..., description="Reuters")
author: Optional[str] = Field(None, description="Reuters")
image: Optional[str] = Field(
None,
description="https://static.reuters.com/resources/2021/10/11/Reuters/Reuters_20211011_0000_01.jpg?w=800&h=533&q=80&crop=1",
)
favicon: Optional[str] = Field(
None,
description="https://static.reuters.com/resources/2021/10/11/Reuters/Reuters_20211011_0000_01.jpg?w=800&h=533&q=80&crop=1",
)
highlights: Optional[List[str]] = Field(
None, description="List of highlights as strings."
)
class DesearchToolSpec(BaseToolSpec):
"""Desearch tool spec."""
spec_functions = [
"ai_search_tool",
"twitter_search_tool",
"web_search_tool",
]
def __init__(self, api_key: str) -> None:
"""Initialize with parameters."""
self.client = Desearch(api_key=api_key)
def ai_search_tool(
self,
prompt: str = Field(description="The search prompt or query."),
tool: List[
Literal[
"web",
"hackernews",
"reddit",
"wikipedia",
"youtube",
"twitter",
"arxiv",
]
] = Field(description="List of tools to use. Must include at least one tool."),
model: str = Field(
default="NOVA",
description="The model to use for the search. Value should 'NOVA', 'ORBIT' or 'HORIZON'",
),
date_filter: Optional[str] = Field(
default=None, description="Date filter for the search."
),
) -> str | dict:
"""
Perform a search using Desearch.
Args:
prompt (str): The search prompt or query.
tool (List[Literal["web", "hackernews", "reddit", "wikipedia", "youtube", "twitter", "arxiv"]]): List of tools to use. Must include at least one tool.
model (str, optional): The model to use for the search. Defaults to "NOVA".
date_filter (Optional[str], optional): Date filter for the search. Defaults to None.
Returns:
str | dict: The search result or an error string.
"""
try:
return self.client.search(
prompt,
tool,
model,
date_filter,
)
except Exception as e:
return str(e)
def twitter_search_tool(
self,
query: str = Field(description="The Twitter search query."),
sort: str = Field(default="Top", description="Sort order for the results."),
count: int = Field(default=10, description="Number of results to return."),
) -> BasicTwitterSearchResponse:
"""
Perform a basic Twitter search using the Exa API.
Args:
query (str, optional): The Twitter search query. Defaults to None.
sort (str, optional): Sort order for the results. Defaults to "Top".
count (int, optional): Number of results to return. Defaults to 10.
Returns:
BasicTwitterSearchResponse: The search results.
Raises:
Exception: If an error occurs when calling the API.
"""
try:
return self.client.basic_twitter_search(query, sort, count)
except Exception as e:
return str(e)
def web_search_tool(
self,
query: str = Field(description="The search query."),
num: int = Field(default=10, description="Number of results to return."),
start: int = Field(
default=1, description="The starting index for the search results."
),
) -> List[WebSearchResult]:
"""
Perform a basic web search using the Exa API.
Args:
query (str, optional): The search query. Defaults to None.
num (int, optional): Number of results to return. Defaults to 10.
start (int, optional): The starting index for the search results. Defaults to 1.
Returns:
List[WebSearchResult]: The search results.
Raises:
Exception: If an error occurs when calling the API.
"""
try:
return self.client.basic_web_search(query, num, start)
except Exception as e:
return str(e)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-desearch/llama_index/tools/desearch/base.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-desearch/tests/test_tools_desearch.py | from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.desearch import DesearchToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in DesearchToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-desearch/tests/test_tools_desearch.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-brightdata/llama_index/tools/brightdata/base.py | """Bright Data tool spec for LlamaIndex."""
from typing import Dict, Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class BrightDataToolSpec(BaseToolSpec):
"""Bright Data tool spec for web scraping and search capabilities."""
spec_functions = [
"scrape_as_markdown",
"get_screenshot",
"search_engine",
"web_data_feed",
]
def __init__(
self,
api_key: str,
zone: str = "unblocker",
verbose: bool = False,
) -> None:
"""
Initialize with API token and default zone.
Args:
api_key (str): Your Bright Data API token
zone (str): Bright Data zone name
verbose (bool): Print additional information about requests
"""
self._headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
self._api_key = api_key
self._zone = zone
self._verbose = verbose
self._endpoint = "https://api.brightdata.com/request"
def _make_request(self, payload: Dict) -> str:
"""
Make a request to Bright Data API.
Args:
payload (Dict): Request payload
Returns:
str: Response text
"""
import requests
import json
if self._verbose:
print(f"[Bright Data] Request: {payload['url']}")
response = requests.post(
self._endpoint, headers=self._headers, data=json.dumps(payload)
)
if response.status_code != 200:
raise Exception(
f"Failed to scrape: {response.status_code} - {response.text}"
)
return response.text
def scrape_as_markdown(self, url: str, zone: Optional[str] = None) -> Document:
"""
Scrape a webpage and return content in Markdown format.
Args:
url (str): URL to scrape
zone (Optional[str]): Override default zone
Returns:
Document: Scraped content as Markdown
"""
payload = {
"url": url,
"zone": zone or self._zone,
"format": "raw",
"data_format": "markdown",
}
content = self._make_request(payload)
return Document(text=content, metadata={"url": url})
def get_screenshot(
self, url: str, output_path: str, zone: Optional[str] = None
) -> str:
"""
Take a screenshot of a webpage.
Args:
url (str): URL to screenshot
output_path (str): Path to save the screenshot
zone (Optional[str]): Override default zone
Returns:
str: Path to saved screenshot
"""
import requests
import json
payload = {
"url": url,
"zone": zone or self._zone,
"format": "raw",
"data_format": "screenshot",
}
response = requests.post(
self._endpoint, headers=self._headers, data=json.dumps(payload)
)
if response.status_code != 200:
raise Exception(f"Error {response.status_code}: {response.text}")
with open(output_path, "wb") as f:
f.write(response.content)
return output_path
def search_engine(
self,
query: str,
engine: str = "google",
zone: Optional[str] = None,
language: Optional[str] = None, # hl parameter, e.g., "en"
country_code: Optional[str] = None, # gl parameter, e.g., "us"
search_type: Optional[
str
] = None, # tbm parameter (images, shopping, news, etc.)
start: Optional[int] = None, # pagination start index
num_results: Optional[int] = 10, # number of results to return
location: Optional[str] = None, # uule parameter for geo-location
device: Optional[str] = None, # device type for user-agent
return_json: bool = False, # parse results as JSON
hotel_dates: Optional[str] = None, # check-in and check-out dates
hotel_occupancy: Optional[int] = None, # number of guests
) -> Document:
"""
Search using Google, Bing, or Yandex with advanced parameters and return results in Markdown.
Args:
query (str): Search query
engine (str): Search engine - 'google', 'bing', or 'yandex'
zone (Optional[str]): Override default zone
# Google SERP specific parameters
language (Optional[str]): Two-letter language code (hl parameter)
country_code (Optional[str]): Two-letter country code (gl parameter)
search_type (Optional[str]): Type of search (images, shopping, news, etc.)
start (Optional[int]): Results pagination offset (0=first page, 10=second page)
num_results (Optional[int]): Number of results to return (default 10)
location (Optional[str]): Location for search results (uule parameter)
device (Optional[str]): Device type (mobile, ios, android, ipad, android_tablet)
return_json (bool): Return parsed JSON instead of HTML/Markdown
# Hotel search parameters
hotel_dates (Optional[str]): Check-in and check-out dates (format: YYYY-MM-DD,YYYY-MM-DD)
hotel_occupancy (Optional[int]): Number of guests (1-4)
Returns:
Document: Search results as Markdown or JSON
"""
encoded_query = self._encode_query(query)
base_urls = {
"google": f"https://www.google.com/search?q={encoded_query}",
"bing": f"https://www.bing.com/search?q={encoded_query}",
"yandex": f"https://yandex.com/search/?text={encoded_query}",
}
if engine not in base_urls:
raise ValueError(
f"Unsupported search engine: {engine}. Use 'google', 'bing', or 'yandex'"
)
search_url = base_urls[engine]
if engine == "google":
params = []
if language:
params.append(f"hl={language}")
if country_code:
params.append(f"gl={country_code}")
if search_type:
if search_type == "jobs":
params.append("ibp=htl;jobs")
else:
search_types = {"images": "isch", "shopping": "shop", "news": "nws"}
tbm_value = search_types.get(search_type, search_type)
params.append(f"tbm={tbm_value}")
if start is not None:
params.append(f"start={start}")
if num_results:
params.append(f"num={num_results}")
if location:
params.append(f"uule={self._encode_query(location)}")
if device:
device_value = "1"
if device in ["ios", "iphone"]:
device_value = "ios"
elif device == "ipad":
device_value = "ios_tablet"
elif device == "android":
device_value = "android"
elif device == "android_tablet":
device_value = "android_tablet"
params.append(f"brd_mobile={device_value}")
if return_json:
params.append("brd_json=1")
if hotel_dates:
params.append(f"hotel_dates={self._encode_query(hotel_dates)}")
if hotel_occupancy:
params.append(f"hotel_occupancy={hotel_occupancy}")
if params:
search_url += "&" + "&".join(params)
payload = {
"url": search_url,
"zone": zone or self._zone,
"format": "raw",
"data_format": "markdown" if not return_json else "raw",
}
content = self._make_request(payload)
return Document(
text=content, metadata={"query": query, "engine": engine, "url": search_url}
)
def web_data_feed(
self,
source_type: str,
url: str,
num_of_reviews: Optional[int] = None,
timeout: int = 600,
polling_interval: int = 1,
) -> Dict:
"""
Retrieve structured web data from various sources like LinkedIn, Amazon, Instagram, etc.
Args:
source_type (str): Type of data source (e.g., 'linkedin_person_profile', 'amazon_product')
url (str): URL of the web resource to retrieve data from
num_of_reviews (Optional[int]): Number of reviews to retrieve (only for facebook_company_reviews)
timeout (int): Maximum time in seconds to wait for data retrieval
polling_interval (int): Time in seconds between polling attempts
Returns:
Dict: Structured data from the requested source
"""
import requests
import time
datasets = {
"amazon_product": "gd_l7q7dkf244hwjntr0",
"amazon_product_reviews": "gd_le8e811kzy4ggddlq",
"linkedin_person_profile": "gd_l1viktl72bvl7bjuj0",
"linkedin_company_profile": "gd_l1vikfnt1wgvvqz95w",
"zoominfo_company_profile": "gd_m0ci4a4ivx3j5l6nx",
"instagram_profiles": "gd_l1vikfch901nx3by4",
"instagram_posts": "gd_lk5ns7kz21pck8jpis",
"instagram_reels": "gd_lyclm20il4r5helnj",
"instagram_comments": "gd_ltppn085pokosxh13",
"facebook_posts": "gd_lyclm1571iy3mv57zw",
"facebook_marketplace_listings": "gd_lvt9iwuh6fbcwmx1a",
"facebook_company_reviews": "gd_m0dtqpiu1mbcyc2g86",
"x_posts": "gd_lwxkxvnf1cynvib9co",
"zillow_properties_listing": "gd_lfqkr8wm13ixtbd8f5",
"booking_hotel_listings": "gd_m5mbdl081229ln6t4a",
"youtube_videos": "gd_m5mbdl081229ln6t4a",
}
if source_type not in datasets:
valid_sources = ", ".join(datasets.keys())
raise ValueError(
f"Invalid source_type: {source_type}. Valid options are: {valid_sources}"
)
dataset_id = datasets[source_type]
request_data = {"url": url}
if source_type == "facebook_company_reviews" and num_of_reviews is not None:
request_data["num_of_reviews"] = str(num_of_reviews)
trigger_response = requests.post(
"https://api.brightdata.com/datasets/v3/trigger",
params={"dataset_id": dataset_id, "include_errors": True},
headers=self._headers,
json=[request_data],
)
trigger_data = trigger_response.json()
if not trigger_data.get("snapshot_id"):
raise Exception("No snapshot ID returned from trigger request")
snapshot_id = trigger_data["snapshot_id"]
if self._verbose:
print(
f"[Bright Data] {source_type} triggered with snapshot ID: {snapshot_id}"
)
attempts = 0
max_attempts = timeout
while attempts < max_attempts:
try:
snapshot_response = requests.get(
f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}",
params={"format": "json"},
headers=self._headers,
)
snapshot_data = snapshot_response.json()
if (
isinstance(snapshot_data, dict)
and snapshot_data.get("status") == "running"
):
if self._verbose:
print(
f"[Bright Data] Snapshot not ready, polling again (attempt {attempts + 1}/{max_attempts})"
)
attempts += 1
time.sleep(polling_interval)
continue
if self._verbose:
print(f"[Bright Data] Data received after {attempts + 1} attempts")
return snapshot_data
except Exception as e:
if self._verbose:
print(f"[Bright Data] Polling error: {e!s}")
attempts += 1
time.sleep(polling_interval)
raise TimeoutError(
f"Timeout after {max_attempts} seconds waiting for {source_type} data"
)
@staticmethod
def _encode_query(query: str) -> str:
"""URL encode a search query."""
from urllib.parse import quote
return quote(query)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-brightdata/llama_index/tools/brightdata/base.py",
"license": "MIT License",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-brightdata/tests/test_tools_brightdata.py | # tests/test_tools_brightdata.py
import unittest
from unittest.mock import patch, MagicMock
import json
import pytest
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.brightdata import BrightDataToolSpec
class TestBrightDataToolSpec(unittest.TestCase):
def test_class_inheritance(self):
"""Test that BrightDataToolSpec inherits from BaseToolSpec."""
names_of_base_classes = [b.__name__ for b in BrightDataToolSpec.__mro__]
self.assertIn(BaseToolSpec.__name__, names_of_base_classes)
def test_initialization(self):
"""Test that the class initializes correctly."""
tool = BrightDataToolSpec(api_key="test_key", zone="test_zone")
self.assertEqual(tool._api_key, "test_key")
self.assertEqual(tool._zone, "test_zone")
self.assertEqual(tool._endpoint, "https://api.brightdata.com/request")
@patch("requests.post")
def test_scrape_as_markdown_success(self, mock_post):
"""Test successful scraping."""
# Mock the response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.text = "# Markdown Content\n\nThis is a test."
mock_post.return_value = mock_response
tool = BrightDataToolSpec(api_key="test_key")
result = tool.scrape_as_markdown("https://example.com")
self.assertEqual(result.text, "# Markdown Content\n\nThis is a test.")
self.assertEqual(result.metadata, {"url": "https://example.com"})
mock_post.assert_called_once()
call_args = mock_post.call_args
self.assertEqual(call_args[0][0], "https://api.brightdata.com/request")
payload = json.loads(call_args[1]["data"])
self.assertEqual(payload["url"], "https://example.com")
self.assertEqual(payload["zone"], "unblocker") # default value
self.assertEqual(payload["format"], "raw")
self.assertEqual(payload["data_format"], "markdown")
headers = call_args[1]["headers"]
self.assertEqual(headers["Authorization"], "Bearer test_key")
@patch("requests.post")
def test_scrape_as_markdown_failure(self, mock_post):
"""Test failed scraping."""
mock_response = MagicMock()
mock_response.status_code = 403
mock_response.text = "Access denied"
mock_post.return_value = mock_response
tool = BrightDataToolSpec(api_key="test_key")
with pytest.raises(Exception) as context:
tool.scrape_as_markdown("https://example.com")
self.assertIn("Failed to scrape: 403", str(context.value))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-brightdata/tests/test_tools_brightdata.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-vectorize/llama_index/retrievers/vectorize/base.py | """Vectorize retrievers."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
from vectorize_client import (
ApiClient,
Configuration,
Document,
PipelinesApi,
RetrieveDocumentsRequest,
)
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import (
NodeRelationship,
NodeWithScore,
QueryBundle,
RelatedNodeInfo,
TextNode,
)
if TYPE_CHECKING:
from llama_index.core.callbacks.base import CallbackManager
class VectorizeRetriever(BaseRetriever):
"""Vectorize retriever.
Setup:
Install package ``llama-index-vectorize``
.. code-block:: bash
pip install -U llama-index-retrievers-vectorize
Instantiate:
.. code-block:: python
from llama_index.retrievers.vectorize import VectorizeRetriever
retriever = VectorizeRetriever(
api_token="xxxxx", "organization"="1234", "pipeline_id"="5678"
)
Usage:
.. code-block:: python
query = "what year was breath of the wild released?"
retriever.retrieve(query)
Args:
api_token: The Vectorize API token.
environment: The Vectorize API environment (prod, dev, local, staging).
Defaults to prod.
organization: The Vectorize organization.
pipeline_id: The Vectorize pipeline ID.
num_results: The number of documents to return.
rerank: Whether to rerank the retrieved documents.
metadata_filters: The metadata filters to apply when retrieving documents.
callback_manager: The callback manager to use for callbacks.
verbose: Whether to enable verbose logging.
"""
def __init__( # noqa: D107
self,
api_token: str,
*,
environment: Literal["prod", "dev", "local", "staging"] = "prod",
organization: str | None = None,
pipeline_id: str | None = None,
num_results: int = 5,
rerank: bool = False,
metadata_filters: list[dict[str, Any]] | None = None,
callback_manager: CallbackManager | None = None,
verbose: bool = False,
) -> None:
super().__init__(callback_manager=callback_manager, verbose=verbose)
self.organization = organization
self.pipeline_id = pipeline_id
self.num_results = num_results
self.rerank = rerank
self.metadata_filters = metadata_filters
header_name = None
header_value = None
if environment == "prod":
host = "https://api.vectorize.io/v1"
elif environment == "dev":
host = "https://api-dev.vectorize.io/v1"
elif environment == "local":
host = "http://localhost:3000/api"
header_name = "x-lambda-api-key"
header_value = api_token
else:
host = "https://api-staging.vectorize.io/v1"
api = ApiClient(
Configuration(host=host, access_token=api_token, debug=True),
header_name,
header_value,
)
self._pipelines = PipelinesApi(api)
@staticmethod
def _convert_document(document: Document) -> NodeWithScore:
doc = TextNode(
id_=document.id,
text=document.text,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=document.unique_source)
},
)
return NodeWithScore(node=doc, score=document.similarity)
def _retrieve(self, query_bundle: QueryBundle) -> list[NodeWithScore]:
query = query_bundle.query_str
request = RetrieveDocumentsRequest(
question=query,
num_results=self.num_results,
rerank=self.rerank,
metadata_filters=self.metadata_filters,
)
response = self._pipelines.retrieve_documents(
self.organization, self.pipeline_id, request
)
return [self._convert_document(doc) for doc in response.documents]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-vectorize/llama_index/retrievers/vectorize/base.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-vectorize/tests/test_retrievers_vectorize.py | import http
import json
import logging
import os
import time
from collections.abc import Iterator
from pathlib import Path
from typing import Literal
from unittest.mock import MagicMock
import pytest
import urllib3
import vectorize_client as v
from urllib3 import HTTPResponse
from llama_index.core.schema import NodeRelationship
from llama_index.retrievers.vectorize import VectorizeRetriever
logger = logging.getLogger(__name__)
VECTORIZE_TOKEN = os.getenv("VECTORIZE_TOKEN", "")
VECTORIZE_ORG = os.getenv("VECTORIZE_ORG", "")
@pytest.fixture(scope="session")
def environment() -> Literal["prod", "dev", "local", "staging"]:
env = os.getenv("VECTORIZE_ENV", "prod")
if env not in ["prod", "dev", "local", "staging"]:
msg = "Invalid VECTORIZE_ENV environment variable."
raise ValueError(msg)
return env
@pytest.fixture(scope="session")
def api_client(environment: str) -> Iterator[v.ApiClient]:
header_name = None
header_value = None
if environment == "prod":
host = "https://api.vectorize.io/v1"
elif environment == "dev":
host = "https://api-dev.vectorize.io/v1"
elif environment == "local":
host = "http://localhost:3000/api"
header_name = "x-lambda-api-key"
header_value = VECTORIZE_TOKEN
else:
host = "https://api-staging.vectorize.io/v1"
with v.ApiClient(
v.Configuration(host=host, access_token=VECTORIZE_TOKEN, debug=True),
header_name,
header_value,
) as api:
yield api
@pytest.fixture(scope="session")
def pipeline_id(api_client: v.ApiClient) -> Iterator[str]:
pipelines = v.PipelinesApi(api_client)
connectors_api = v.ConnectorsApi(api_client)
response = connectors_api.create_source_connector(
VECTORIZE_ORG,
[
v.CreateSourceConnector(
name="from api", type=v.SourceConnectorType.FILE_UPLOAD
)
],
)
source_connector_id = response.connectors[0].id
logger.info("Created source connector %s", source_connector_id)
uploads_api = v.UploadsApi(api_client)
upload_response = uploads_api.start_file_upload_to_connector(
VECTORIZE_ORG,
source_connector_id,
v.StartFileUploadToConnectorRequest(
name="research.pdf",
content_type="application/pdf",
metadata=json.dumps({"created-from-api": True}),
),
)
http_pool = urllib3.PoolManager()
this_dir = Path(__file__).parent
file_path = this_dir / "research.pdf"
with file_path.open("rb") as f:
http_response = http_pool.request(
"PUT",
upload_response.upload_url,
body=f,
headers={
"Content-Type": "application/pdf",
"Content-Length": str(file_path.stat().st_size),
},
)
if http_response.status != http.HTTPStatus.OK:
msg = "Upload failed:"
raise ValueError(msg)
else:
logger.info("Upload successful")
ai_platforms = connectors_api.get_ai_platform_connectors(VECTORIZE_ORG)
builtin_ai_platform = next(
c.id for c in ai_platforms.ai_platform_connectors if c.type == "VECTORIZE"
)
logger.info("Using AI platform %s", builtin_ai_platform)
vector_databases = connectors_api.get_destination_connectors(VECTORIZE_ORG)
builtin_vector_db = next(
c.id for c in vector_databases.destination_connectors if c.type == "VECTORIZE"
)
logger.info("Using destination connector %s", builtin_vector_db)
pipeline_response = pipelines.create_pipeline(
VECTORIZE_ORG,
v.PipelineConfigurationSchema(
source_connectors=[
v.SourceConnectorSchema(
id=source_connector_id,
type=v.SourceConnectorType.FILE_UPLOAD,
config={},
)
],
destination_connector=v.DestinationConnectorSchema(
id=builtin_vector_db,
type=v.DestinationConnectorType.VECTORIZE,
config={},
),
ai_platform=v.AIPlatformSchema(
id=builtin_ai_platform,
type=v.AIPlatformType.VECTORIZE,
config=v.AIPlatformConfigSchema(),
),
pipeline_name="Test pipeline",
schedule=v.ScheduleSchema(type=v.ScheduleSchemaType.MANUAL),
),
)
pipeline_id = pipeline_response.data.id
logger.info("Created pipeline %s", pipeline_id)
yield pipeline_id
try:
pipelines.delete_pipeline(VECTORIZE_ORG, pipeline_id)
except Exception:
logger.exception("Failed to delete pipeline %s", pipeline_id)
@pytest.mark.skipif(
VECTORIZE_TOKEN == "" or VECTORIZE_ORG == "",
reason="missing Vectorize credentials (VECTORIZE_TOKEN, VECTORIZE_ORG)",
)
def test_retrieve_integration(
environment: Literal["prod", "dev", "local", "staging"],
pipeline_id: str,
) -> None:
retriever = VectorizeRetriever(
environment=environment,
api_token=VECTORIZE_TOKEN,
organization=VECTORIZE_ORG,
pipeline_id=pipeline_id,
num_results=2,
)
start = time.time()
while True:
docs = retriever.retrieve("What are you?")
if len(docs) == 2:
break
if time.time() - start > 180:
msg = "Docs not retrieved in time"
raise RuntimeError(msg)
time.sleep(1)
def test_retrieve_unit() -> None:
retriever = VectorizeRetriever(
environment="prod",
api_token="fake_token", # noqa: S106
organization="fake_org",
pipeline_id="fake_pipeline_id",
)
retriever._pipelines.api_client.rest_client.pool_manager.urlopen = MagicMock(
return_value=HTTPResponse(
body=json.dumps(
{
"documents": [
{
"relevancy": 0.42,
"id": "fake_id",
"text": "fake_text",
"chunk_id": "fake_chunk_id",
"total_chunks": "fake_total_chunks",
"origin": "fake_origin",
"origin_id": "fake_origin_id",
"similarity": 0.43,
"source": "fake_source",
"unique_source": "fake_unique_source",
"source_display_name": "fake_source_display_name",
},
],
"question": "fake_question",
"average_relevancy": 0.44,
"ndcg": 0.45,
}
).encode(),
status=200,
)
)
docs = retriever.retrieve("fake_question")
assert len(docs) == 1
assert docs[0].node.id_ == "fake_id"
assert docs[0].node.text == "fake_text"
assert (
docs[0].node.relationships[NodeRelationship.SOURCE].node_id
== "fake_unique_source"
)
assert docs[0].score == 0.43
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-vectorize/tests/test_retrievers_vectorize.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/utils.py | from typing import Any, Dict, List, Optional
from mcp.client.session import ClientSession
from mcp.server.fastmcp import FastMCP, Context
from pydantic import BaseModel
from llama_index.core.tools import FunctionTool
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
def get_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
global_partial_params: Optional[Dict[str, Any]] = None,
partial_params_by_tool: Optional[Dict[str, Dict[str, Any]]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
global_partial_params: A dict of params to apply to all tools globally.
partial_params_by_tool: A dict mapping tool names to param overrides.
Values override global_partial_params. Use None as a value to remove a global param for a specific tool.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client,
allowed_tools=allowed_tools,
global_partial_params=global_partial_params,
partial_params_by_tool=partial_params_by_tool,
include_resources=include_resources,
)
return tool_spec.to_tool_list()
async def aget_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
global_partial_params: Optional[Dict[str, Any]] = None,
partial_params_by_tool: Optional[Dict[str, Dict[str, Any]]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
global_partial_params: A dict of params to apply to all tools globally.
partial_params_by_tool: A dict mapping tool names to param overrides.
Values override global_partial_params. Use None as a value to remove a global param for a specific tool.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client,
allowed_tools=allowed_tools,
global_partial_params=global_partial_params,
partial_params_by_tool=partial_params_by_tool,
include_resources=include_resources,
)
return await tool_spec.to_tool_list_async()
def workflow_as_mcp(
workflow: Workflow,
workflow_name: Optional[str] = None,
workflow_description: Optional[str] = None,
start_event_model: Optional[BaseModel] = None,
**fastmcp_init_kwargs: Any,
) -> FastMCP:
"""
Convert a workflow to an MCP app.
This will convert any `Workflow` to an MCP app. It will expose the workflow as a tool
within MCP, which will
Args:
workflow:
The workflow to convert.
workflow_name (optional):
The name of the workflow. Defaults to the workflow class name.
workflow_description (optional):
The description of the workflow. Defaults to the workflow docstring.
start_event_model (optional):
The start event model of the workflow. Can be a `BaseModel` or a `StartEvent` class.
Defaults to the workflow's custom `StartEvent` class.
**fastmcp_init_kwargs:
Additional keyword arguments to pass to the FastMCP constructor.
Returns:
The MCP app object.
"""
app = FastMCP(**fastmcp_init_kwargs)
# Dynamically get the start event class -- this is a bit of a hack
StartEventCLS = start_event_model or workflow._start_event_class
if StartEventCLS == StartEvent:
raise ValueError(
"Must declare a custom StartEvent class in your workflow or provide a start_event_model."
)
# Get the workflow name and description
workflow_name = workflow_name or workflow.__class__.__name__
workflow_description = workflow_description or workflow.__doc__
@app.tool(name=workflow_name, description=workflow_description)
async def _workflow_tool(run_args: StartEventCLS, context: Context) -> Any:
# Handle edge cases where the start event is an Event or a BaseModel
# If the workflow does not have a custom StartEvent class, then we need to handle the event differently
if isinstance(run_args, Event) and workflow._start_event_class != StartEvent:
handler = workflow.run(start_event=run_args)
elif isinstance(run_args, BaseModel):
handler = workflow.run(**run_args.model_dump())
elif isinstance(run_args, dict):
start_event = StartEventCLS.model_validate(run_args)
handler = workflow.run(start_event=start_event)
else:
raise ValueError(f"Invalid start event type: {type(run_args)}")
async for event in handler.stream_events():
if not isinstance(event, StopEvent):
await context.log("info", message=event.model_dump_json())
return await handler
return app
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/utils.py",
"license": "MIT License",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-core/tests/indices/vector_store/test_simple_async.py | import pytest
from typing import List
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import Document
from llama_index.core.vector_stores.simple import SimpleVectorStore
@pytest.mark.asyncio
async def test_simple_insertion(
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
):
index = VectorStoreIndex.from_documents(
documents=documents, embed_model=mock_embed_model
)
assert isinstance(index, VectorStoreIndex)
# insert into index
await index.ainsert(Document(text="This is a test v3."))
# insert empty document to test empty document handling
await index.ainsert(Document(text=""))
# check contenst of nodes
actual_node_tups = [
("Hello world.", [1, 0, 0, 0, 0]),
("This is a test.", [0, 1, 0, 0, 0]),
("This is another test.", [0, 0, 1, 0, 0]),
("This is a test v2.", [0, 0, 0, 1, 0]),
("This is a test v3.", [0, 0, 0, 0, 1]),
]
for text_id in index.index_struct.nodes_dict:
node_id = index.index_struct.nodes_dict[text_id]
node = index.docstore.get_node(node_id)
# NOTE: this test breaks abstraction
assert isinstance(index._vector_store, SimpleVectorStore)
embedding = index._vector_store.get(text_id)
assert (node.get_content(), embedding) in actual_node_tups
@pytest.mark.asyncio
async def test_simple_deletion(
patch_llm_predictor, patch_token_text_splitter, mock_embed_model
) -> None:
"""Test delete VectorStoreIndex."""
new_documents = [
Document(text="Hello world.", id_="test_id_0"),
Document(text="This is a test.", id_="test_id_1"),
Document(text="This is another test.", id_="test_id_2"),
Document(text="This is a test v2.", id_="test_id_3"),
]
index = VectorStoreIndex.from_documents(
documents=new_documents, embed_model=mock_embed_model
)
assert isinstance(index, VectorStoreIndex)
await index.adelete_ref_doc("test_id_0")
assert len(index.index_struct.nodes_dict) == 3
actual_node_tups = [
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.nodes_dict:
node_id = index.index_struct.nodes_dict[text_id]
node = index.docstore.get_node(node_id)
# NOTE: this test breaks abstraction
assert isinstance(index._vector_store, SimpleVectorStore)
embedding = index._vector_store.get(text_id)
assert (node.get_content(), embedding, node.ref_doc_id) in actual_node_tups
# test insert
await index.ainsert(Document(text="Hello world backup.", id_="test_id_0"))
assert len(index.index_struct.nodes_dict) == 4
actual_node_tups = [
("Hello world backup.", [1, 0, 0, 0, 0], "test_id_0"),
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.nodes_dict:
node_id = index.index_struct.nodes_dict[text_id]
node = index.docstore.get_node(node_id)
# NOTE: this test breaks abstraction
assert isinstance(index._vector_store, SimpleVectorStore)
embedding = index._vector_store.get(text_id)
assert (node.get_content(), embedding, node.ref_doc_id) in actual_node_tups
@pytest.mark.asyncio
async def test_simple_update(
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
):
new_docs = [
Document(id_="1", text="Hello World"),
Document(id_="2", text="This is a test"),
]
index = VectorStoreIndex.from_documents(
documents=new_docs, embed_model=mock_embed_model
)
assert isinstance(index, VectorStoreIndex)
actual_node_tups = [
("Hello World v1", "1"),
("This is a test", "2"),
]
await index.aupdate_ref_doc(Document(id_="1", text="Hello World v1"))
for text_id in index.index_struct.nodes_dict:
node_id = index.index_struct.nodes_dict[text_id]
node = index.docstore.get_node(node_id)
# NOTE: this test breaks abstraction
assert (node.get_content(), node.ref_doc_id) in actual_node_tups
@pytest.mark.asyncio
async def test_simple_refresh(
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
):
new_docs = [
Document(id_="1", text="Hello World"),
Document(id_="2", text="This is a test"),
]
index = VectorStoreIndex.from_documents(
documents=new_docs, embed_model=mock_embed_model
)
assert isinstance(index, VectorStoreIndex)
await index.arefresh_ref_docs(
[
Document(id_="1", text="Hello World v1"),
Document(id_="2", text="This is a test v1"),
]
)
actual_node_tups = [
("Hello World v1", "1"),
("This is a test v1", "2"),
]
for text_id in index.index_struct.nodes_dict:
node_id = index.index_struct.nodes_dict[text_id]
node = index.docstore.get_node(node_id)
# NOTE: this test breaks abstraction
assert (node.get_content(), node.ref_doc_id) in actual_node_tups
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/indices/vector_store/test_simple_async.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/llama_dev/pkg/bump.py | import click
from llama_dev.utils import (
BumpType,
bump_version,
find_all_packages,
is_llama_index_package,
load_pyproject,
update_pyproject_version,
)
@click.command(short_help="Bump package version")
@click.argument("package_names", required=False, nargs=-1)
@click.option(
"--all",
is_flag=True,
help="Bump version for all the packages in the monorepo",
)
@click.option(
"--version-type",
type=click.Choice([t.value for t in BumpType], case_sensitive=False),
default=BumpType.PATCH.value,
help="Type of version bump to perform (default: patch)",
)
@click.option(
"--dry-run",
is_flag=True,
help="Show what would be done without making changes",
)
@click.pass_obj
def bump(
obj: dict,
all: bool,
package_names: tuple,
version_type: str,
dry_run: bool,
):
"""Bump version for specified packages or all packages."""
console = obj["console"]
if not all and not package_names:
raise click.UsageError("Either specify package name(s) or use the --all flag")
packages = set()
if all:
packages = find_all_packages(obj["repo_root"])
else:
for package_name in package_names:
package_path = obj["repo_root"] / package_name
if not is_llama_index_package(package_path):
raise click.UsageError(
f"{package_name} is not a path to a LlamaIndex package"
)
packages.add(package_path)
bump_enum = BumpType(version_type)
# First, collect all packages and their version changes
changes = []
for package in packages:
try:
package_data = load_pyproject(package)
current_version = package_data["project"]["version"]
new_version = bump_version(current_version, bump_enum)
if dry_run:
console.print(
f"Would bump {package.relative_to(obj['repo_root'])} from {current_version} to {new_version}"
)
else:
update_pyproject_version(package, new_version)
except Exception as e:
console.print(
f"[error]Error processing {package.relative_to(obj['repo_root'])}: {e!s}[/error]"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/pkg/bump.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-dev/tests/pkg/test_bump.py | import builtins
from unittest import mock
from click.testing import CliRunner
from llama_dev.cli import cli
from llama_dev.pkg.bump import BumpType, bump_version, update_pyproject_version
def test_bump_version():
assert bump_version("0.0", BumpType.PATCH) == "0.0.1"
assert bump_version("0.0", BumpType.MINOR) == "0.1.0"
assert bump_version("1.0.0", BumpType.MAJOR) == "2.0.0"
def test_update_pyproject_version(data_path):
pkg = data_path / "llama-index-utils/util"
# Use the true open if it's the first call
real_open = open
mocked_open = mock.mock_open(read_data='[project]\nversion = "1.0.0"\n')
mocked_open.side_effect = (
lambda *args, **kwargs: real_open(*args, **kwargs)
if mocked_open.call_count == 0
else mock.DEFAULT
)
with mock.patch.object(builtins, "open", mocked_open):
update_pyproject_version(pkg, "99.0.0")
# Ensure open is called in both read and write modes
mocked_open.assert_any_call(pkg / "pyproject.toml", "r")
mocked_open.assert_any_call(pkg / "pyproject.toml", "w")
# Expect the version to be updated in the string that's attempted to be written
assert 'version = "99.0.0"' in mocked_open().write.call_args[0][0]
def test_bump_command_no_arguments():
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "bump"])
assert result.exit_code != 0
assert "Either specify package name(s) or use the --all flag" in result.output
def test_bump_command_with_all_flag():
runner = CliRunner()
with (
mock.patch(
"llama_dev.pkg.bump.find_all_packages",
return_value={"package1", "package2"},
),
mock.patch(
"llama_dev.pkg.bump.load_pyproject",
return_value={"project": {"version": "1.0.0", "name": "package1"}},
),
mock.patch(
"llama_dev.pkg.bump.update_pyproject_version"
) as mock_update_version,
):
result = runner.invoke(cli, ["pkg", "bump", "--all"])
assert result.exit_code == 0
assert mock_update_version.call_count == 2
def test_bump_command_specific_packages(data_path):
runner = CliRunner()
with (
mock.patch(
"llama_dev.pkg.bump.load_pyproject",
return_value={"project": {"version": "1.0.0", "name": "package1"}},
),
mock.patch("llama_dev.pkg.bump.is_llama_index_package", return_value=True),
mock.patch(
"llama_dev.pkg.bump.update_pyproject_version"
) as mock_update_version,
):
result = runner.invoke(cli, ["pkg", "bump", f"{data_path / 'package1'}"])
assert result.exit_code == 0
mock_update_version.assert_called_once_with(data_path / "package1", "1.0.1")
def test_bump_command_specific_packages_dry_run(data_path):
runner = CliRunner()
with (
mock.patch(
"llama_dev.pkg.bump.load_pyproject",
return_value={"project": {"version": "1.0.0", "name": "package1"}},
),
mock.patch("llama_dev.pkg.bump.is_llama_index_package", return_value=True),
mock.patch(
"llama_dev.pkg.bump.update_pyproject_version"
) as mock_update_version,
):
result = runner.invoke(
cli, ["pkg", "bump", f"{data_path / 'package1'}", "--dry-run"]
)
assert result.exit_code == 0
mock_update_version.assert_not_called()
assert "Would bump tests/data/package1 from 1.0.0 to 1.0.1" in result.output
def test_bump_command_specific_packages_not_a_package(data_path):
runner = CliRunner()
with (
mock.patch(
"llama_dev.pkg.bump.load_pyproject",
return_value={"project": {"version": "1.0.0", "name": "package1"}},
),
mock.patch("llama_dev.pkg.bump.is_llama_index_package", return_value=False),
mock.patch(
"llama_dev.pkg.bump.update_pyproject_version"
) as mock_update_version,
):
result = runner.invoke(cli, ["pkg", "bump", f"{data_path / 'package1'}"])
assert result.exit_code != 0
mock_update_version.assert_not_called()
assert "package1 is not a path to a LlamaIndex package" in result.output
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/pkg/test_bump.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-legacy-office/llama_index/readers/legacy_office/reader.py | """Legacy Office Reader for LlamaIndex."""
import os
import logging
import requests
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class LegacyOfficeReader(BaseReader):
"""
Legacy Office Reader for parsing old Office documents (.doc, etc.) using Apache Tika.
This reader uses Apache Tika to parse legacy Office documents like Word 97 (.doc) files.
It can use either a local Tika server or connect to a remote one.
Args:
tika_server_jar_path (Optional[str]): Path to the Tika server JAR file.
If not provided, will download and use the default Tika server JAR.
tika_server_url (Optional[str]): URL of remote Tika server.
If provided, will use remote server instead of starting local one.
cache_dir (Optional[str]): Directory to cache the Tika server JAR.
Defaults to ~/.cache/llama_index/tika
excluded_embed_metadata_keys (Optional[List[str]]): Metadata keys to exclude from embedding.
excluded_llm_metadata_keys (Optional[List[str]]): Metadata keys to exclude from LLM.
"""
def __init__(
self,
tika_server_jar_path: Optional[str] = None,
tika_server_url: Optional[str] = None,
cache_dir: Optional[str] = None,
excluded_embed_metadata_keys: Optional[List[str]] = None,
excluded_llm_metadata_keys: Optional[List[str]] = None,
) -> None:
"""Initialize with parameters."""
super().__init__()
try:
import tika
from tika import parser
except ImportError as err:
raise ImportError(
"`tika` package not found, please run `pip install tika`"
) from err
self.parser = parser
self.excluded_embed_metadata_keys = excluded_embed_metadata_keys or []
self.excluded_llm_metadata_keys = excluded_llm_metadata_keys or []
# Set up cache directory
if cache_dir is None:
cache_dir = os.path.expanduser("~/.cache/llama_index/tika")
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
# Handle remote server configuration
if tika_server_url:
logger.info(f"Using remote Tika server at {tika_server_url}")
os.environ["TIKA_SERVER_ENDPOINT"] = tika_server_url
return
# Set up local Tika server
if tika_server_jar_path:
os.environ["TIKA_SERVER_JAR"] = tika_server_jar_path
else:
# Use cached JAR if available
cached_jar = self.cache_dir / "tika-server.jar"
if cached_jar.exists():
logger.info("Using cached Tika server JAR")
os.environ["TIKA_SERVER_JAR"] = str(cached_jar)
else:
logger.info("Downloading Tika server JAR (this may take a while)...")
os.environ["TIKA_SERVER_JAR"] = str(cached_jar)
# Check if Tika server is already running
try:
response = requests.get("http://localhost:9998/version")
if response.status_code == 200:
logger.info("Using existing Tika server on port 9998")
os.environ["TIKA_SERVER_ENDPOINT"] = "http://localhost:9998"
return
except requests.RequestException:
# Server not running, will start it
pass
# Initialize Tika
logger.info("Initializing Tika server...")
tika.initVM()
# Set server endpoint
os.environ["TIKA_SERVER_ENDPOINT"] = "http://localhost:9998"
logger.info("Tika server will run on port 9998")
def _process_metadata(
self, tika_metadata: Dict[str, Any], file_path: str
) -> Dict[str, Any]:
"""
Process Tika metadata into LlamaIndex format.
Args:
tika_metadata: Raw metadata from Tika
file_path: Path to the document
Returns:
Processed metadata dictionary with essential information only
"""
# Start with basic metadata
metadata = {
"file_path": file_path,
"file_name": Path(file_path).name,
"file_type": Path(file_path).suffix.lower(),
}
# Whitelist of metadata keys to keep
essential_keys = {
# Document properties
"title": "title",
"dc:title": "title",
"dc:creator": "author",
"meta:author": "author",
"meta:word-count": "words",
"meta:character-count": "chars",
"meta:page-count": "pages",
"xmptpg:npages": "pages",
# Dates
"dcterms:created": "created",
"dcterms:modified": "modified",
}
for key, orig_value in tika_metadata.items():
# Skip if not an essential key
normalized_key = essential_keys.get(key.lower())
if not normalized_key:
continue
# Skip empty values
if not orig_value:
continue
# Handle lists by joining with semicolon
processed_value = orig_value
if isinstance(orig_value, list):
processed_value = "; ".join(str(v) for v in orig_value)
# Convert to string and clean up
processed_value = str(processed_value).strip()
if processed_value and ":" in processed_value:
processed_value = processed_value.split(":", 1)[1].strip()
if processed_value:
metadata[normalized_key] = processed_value
return metadata
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""
Load data from legacy Office documents.
Args:
file (Path): Path to the legacy Office document.
extra_info (Optional[Dict]): Optional dictionary of extra metadata to add.
fs (Optional[AbstractFileSystem]): Optional filesystem to use.
Returns:
List[Document]: List of documents parsed from the file.
Raises:
ValueError: If document parsing fails or content is empty.
"""
try:
logger.info(f"Parsing document: {file}")
# Parse the document using Tika
if fs:
with fs.open(file) as f:
parsed = cast(Dict[str, Any], self.parser.from_buffer(f.read()))
else:
parsed = cast(Dict[str, Any], self.parser.from_file(str(file)))
if parsed is None:
raise ValueError(f"Failed to parse document: {file}")
content = str(parsed.get("content", "")).strip()
if not content:
raise ValueError(f"No content found in document: {file}")
# Process metadata
tika_metadata = parsed.get("metadata", {})
if not isinstance(tika_metadata, dict):
tika_metadata = {}
metadata = self._process_metadata(tika_metadata, str(file))
if extra_info:
metadata.update(extra_info)
# Create document with content and metadata
doc = Document(
text=content,
metadata=metadata,
excluded_embed_metadata_keys=self.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=self.excluded_llm_metadata_keys,
)
logger.info(f"Successfully parsed document: {file}")
return [doc]
except Exception as e:
logger.error(f"Error processing document {file}: {e!s}")
raise ValueError(f"Error processing document {file}: {e!s}")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-legacy-office/llama_index/readers/legacy_office/reader.py",
"license": "MIT License",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/node_parser/llama-index-node-parser-slide/llama_index/node_parser/slide/base.py | import warnings
from typing import Dict, List, Optional, Callable, Sequence
from llama_index.core.bridge.pydantic import Field, model_validator
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.text.utils import split_by_sentence_tokenizer
from llama_index.core.node_parser.node_utils import (
build_nodes_from_splits,
default_id_func,
)
from llama_index.core.async_utils import run_jobs
from llama_index.core.node_parser.text.semantic_splitter import SentenceSplitterCallable
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.utils import get_tqdm_iterable
from llama_index.core.utilities.token_counting import TokenCounter
from llama_index.core.schema import BaseNode, Document
from llama_index.core.llms.llm import LLM
from llama_index.core.llms import ChatMessage
# prompts taken from the paper --> SLIDE: Sliding Localized Information for Document Extraction
CONTEXT_GENERATION_SYSTEM_PROMPT = """You are an assistant which generates short English context to situate the input chunks in the input document.
Failure to adhere to this guideline will get you terminated."""
CONTEXT_GENERATION_USER_PROMPT = """Here is the document: '{window_chunk}'
Here is the chunk we want to situate within the whole document: '{chunk}'
Please give English context to situate this chunk within the overall document for the purposes of improving search retrieval of the chunk.
Answer only with a short context.
Do not provide any additional text."""
class SlideNodeParser(NodeParser):
"""Node parser using the SLIDE based approach using LLMs to improve chunk context."""
chunk_size: int = Field(
default=1200,
description="tokens per base chunk",
)
window_size: int = Field(
default=11,
description="Window size for the sliding window approach. This is the total number chunks to include in the context window, ideall an odd number.",
)
llm_workers: int = Field(
default=1,
description="Number of workers to use for LLM calls. This is only used when using the async version of the parser.",
)
llm: LLM = Field(description="The LLM model to use for generating local context")
token_counter: TokenCounter = Field(description="Token counter for sentences")
sentence_splitter: SentenceSplitterCallable = Field(
default_factory=split_by_sentence_tokenizer,
description="Sentence splitter to use for splitting text into sentences.",
exclude=True,
)
@classmethod
def class_name(cls) -> str:
return "SlideNodeParser"
@classmethod
def from_defaults(
cls,
chunk_size: int = 1200,
window_size: int = 11,
llm_workers: int = 1,
llm: Optional[LLM] = None,
token_counter: Optional[TokenCounter] = None,
sentence_splitter: Optional[Callable[[str], List[str]]] = None,
callback_manager: Optional[CallbackManager] = None,
id_func: Optional[Callable[[int, Document], str]] = None,
) -> "SlideNodeParser":
"""Create instance of the class with default values."""
from llama_index.core import Settings
callback_manager = callback_manager or CallbackManager([])
id_func = id_func or default_id_func
llm = llm or Settings.llm
token_counter = token_counter or TokenCounter()
sentence_splitter = sentence_splitter or split_by_sentence_tokenizer()
return cls(
callback_manager=callback_manager,
id_func=id_func,
chunk_size=chunk_size,
window_size=window_size,
llm_workers=llm_workers,
llm=llm,
token_counter=token_counter,
sentence_splitter=sentence_splitter,
)
@model_validator(mode="after")
def validate_slide_config(self):
# 1) chunk_size ≥ 1
if self.chunk_size < 1:
raise ValueError("`chunk_size` must be greater than or equal to 1.")
# 2) Warn if chunk_size is impractically small
if self.chunk_size < 50:
warnings.warn(
f"chunk_size={self.chunk_size} may be too small for meaningful chunking. "
"This could lead to poor context quality and high LLM call overhead.",
stacklevel=2,
)
# 3) window_size ≥ 1
if self.window_size < 1:
raise ValueError("`window_size` must be greater than or equal to 1.")
# 4) Validate LLM context budget: chunk_size × window_size
context_window = getattr(
getattr(self.llm, "metadata", None), "context_window", None
)
if context_window is not None:
estimated_tokens = self.chunk_size * self.window_size
if estimated_tokens > context_window:
raise ValueError(
f"SLIDE configuration exceeds LLM context window: "
f"{self.chunk_size} × {self.window_size} = {estimated_tokens} tokens, "
f"but the LLM supports only {context_window} tokens."
)
else:
# 5) Warn if context_window not provided
warnings.warn(
"The LLM does not expose `metadata.context_window`. "
"SLIDE cannot validate token usage, which may lead to truncation or generation failures.",
stacklevel=2,
)
return self
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> List[BaseNode]:
"""Parse document into nodes."""
# Warn if someone set llm_workers > 1 but is using sync parsing
if self.llm_workers != 1:
warnings.warn(
"llm_workers has no effect when using synchronous parsing. "
"If you want parallel LLM calls, use `aget_nodes_from_documents(...)` "
"with llm_workers > 1.",
stacklevel=2,
)
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.build_slide_nodes_from_documents([node])
all_nodes.extend(nodes)
return all_nodes
async def _aparse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> List[BaseNode]:
"""Asynchronous parse document into nodes."""
# If llm_workers is left at 1, no parallelism will occur.
if self.llm_workers == 1:
warnings.warn(
"To parallelize LLM calls in async parsing, initialize with llm_workers > 1.",
stacklevel=2,
)
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(
nodes, show_progress, "Parsing nodes (async)"
)
for node in nodes_with_progress:
nodes = await self.abuild_slide_nodes_from_documents([node], show_progress)
all_nodes.extend(nodes)
return all_nodes
def create_individual_chunks(self, sentences: List[str]) -> List[str]:
"""Greedily add sentences to each chunk until we reach the chunk size limit."""
chunks = []
current_chunk = ""
for sentence in sentences:
potential_chunk = (current_chunk + " " + sentence).strip()
if (
not current_chunk
or self.token_counter.get_string_tokens(potential_chunk)
<= self.chunk_size
):
current_chunk = potential_chunk
else:
chunks.append(current_chunk)
current_chunk = sentence
if current_chunk:
chunks.append(current_chunk)
return chunks
def build_localised_splits(
self,
chunks: List[str],
) -> List[Dict[str, str]]:
"""Generate localized context for each chunk using a sliding window approach."""
half_window = self.window_size // 2
localized_splits = []
for i in range(len(chunks)):
start = max(0, i - half_window)
end = min(len(chunks), i + half_window + 1)
window_chunk = " ".join(chunks[start:end])
# format prompt with current chunk and window chunk
llm_messages = [
ChatMessage(role="system", content=CONTEXT_GENERATION_SYSTEM_PROMPT),
ChatMessage(
role="user",
content=CONTEXT_GENERATION_USER_PROMPT.format(
window_chunk=window_chunk, chunk=chunks[i]
),
),
]
# generate localized context using LLM
localized_context = str(self.llm.chat(messages=llm_messages))
localized_splits.append(
{
"text": chunks[i],
"context": localized_context,
}
)
return localized_splits
async def abuild_localised_splits(
self,
chunks: List[str],
show_progress: bool = False,
) -> List[Dict[str, str]]:
"""Async version: batch all LLM calls for each chunk via run_jobs."""
half_window = self.window_size // 2
# prepare one achat() coroutine per chunk
jobs = []
for i, chunk in enumerate(chunks):
start = max(0, i - half_window)
end = min(len(chunks), i + half_window + 1)
window_chunk = " ".join(chunks[start:end])
llm_messages = [
ChatMessage(role="system", content=CONTEXT_GENERATION_SYSTEM_PROMPT),
ChatMessage(
role="user",
content=CONTEXT_GENERATION_USER_PROMPT.format(
window_chunk=window_chunk, chunk=chunk
),
),
]
jobs.append(self.llm.achat(messages=llm_messages))
# run them up to a maximum of llm_workers at once, get ordered responses
responses = await run_jobs(
jobs=jobs,
workers=self.llm_workers,
show_progress=show_progress,
desc="Generating local contexts",
)
# reassemble into the split format
return [
{"text": chunks[i], "context": str(resp)}
for i, resp in enumerate(responses)
]
def post_process_nodes(
self,
nodes: List[BaseNode],
contexts: List[str],
) -> List[BaseNode]:
"""
Attach slide_context metadata to each node based on the provided contexts.
"""
for node, context in zip(nodes, contexts):
# Preserve any existing metadata, then add our slide context
node.metadata["local_context"] = context
return nodes
def build_slide_nodes_from_documents(
self,
documents: Sequence[Document],
) -> List[BaseNode]:
"""
Build nodes enriched with localized context using a sliding window approach.
This is the primary function of the class.
"""
all_nodes: List[BaseNode] = []
for document in documents:
# Split into sentences and base chunks
doctext = document.get_content()
sentences = self.sentence_splitter(doctext)
chunks = self.create_individual_chunks(sentences)
# build localized splits
splits = self.build_localised_splits(chunks)
texts = [split["text"] for split in splits]
contexts = [split["context"] for split in splits]
# build and annotate nodes
nodes = build_nodes_from_splits(
text_splits=texts, document=document, id_func=self.id_func
)
nodes = self.post_process_nodes(nodes, contexts)
all_nodes.extend(nodes)
return all_nodes
async def abuild_slide_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
) -> List[BaseNode]:
"""
Asynchronously build nodes enriched with localized context using a sliding window approach.
"""
all_nodes: List[BaseNode] = []
for document in documents:
# Split into sentences and base chunks
doctext = document.get_content()
sentences = self.sentence_splitter(doctext)
chunks = self.create_individual_chunks(sentences)
# get localized splits using an async function
splits = await self.abuild_localised_splits(chunks, show_progress)
texts = [s["text"] for s in splits]
contexts = [s["context"] for s in splits]
# build and annotate nodes
nodes = build_nodes_from_splits(
text_splits=texts, document=document, id_func=self.id_func
)
nodes = self.post_process_nodes(nodes, contexts)
all_nodes.extend(nodes)
return all_nodes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/node_parser/llama-index-node-parser-slide/llama_index/node_parser/slide/base.py",
"license": "MIT License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/node_parser/llama-index-node-parser-slide/tests/test_node_parser_slide.py | """
Unit tests for SlideNodeParser.
Covers:
- Synchronous parsing (`get_nodes_from_documents`) under various chunk_size, window_size, and llm_workers.
- Asynchronous parsing (`aget_nodes_from_documents`) under the same parameter sets.
- Parallelism of async LLM calls (`achat`) via run_jobs, ensuring overlap when llm_workers > 1.
- Correct per-chunk invocation counts for both sync (`chat`) and async (`achat`).
- Edge‐case behavior for empty documents (no nodes returned, warning emitted).
- Handling of inputs shorter than the window (still produces valid nodes with context).
"""
import asyncio
import pytest
import warnings
from llama_index.core import Document
from llama_index.core.llms import MockLLM
from llama_index.node_parser.slide import SlideNodeParser
from unittest.mock import patch, AsyncMock
@pytest.mark.parametrize(
("chunk_size", "window_size", "llm_workers"),
[
(1, 1, 1),
(10, 3, 2),
(100, 5, 4),
(390, 10, 8),
],
)
def test_sync_parsing_no_errors(chunk_size, window_size, llm_workers):
"""
Integration test to ensure the sync parsing path (get_nodes_from_documents)
completes without errors under various parameters.
"""
# Patch the blocking LLM.chat so it returns instantly
with patch.object(MockLLM, "chat", return_value="ctx"):
llm = MockLLM()
parser = SlideNodeParser.from_defaults(
llm=llm,
chunk_size=chunk_size,
window_size=window_size,
llm_workers=llm_workers,
)
# Three sample documents with multiple sentences
doc1 = Document(text="This is the first document. It has some sentences.")
doc2 = Document(text="This is the second document. Different content.")
doc3 = Document(text="And this is the third one. More text here.")
# Should run without raising, regardless of chunk_size/window_size/llm_workers
nodes = parser.get_nodes_from_documents([doc1, doc2, doc3])
# Basic sanity checks
assert isinstance(nodes, list)
# Every node must have the local_context metadata set
assert all("local_context" in node.metadata for node in nodes)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("chunk_size", "window_size", "llm_workers"),
[(1, 1, 1), (10, 3, 2), (100, 5, 4), (390, 10, 8)],
)
async def test_async_parsing_no_errors(chunk_size, window_size, llm_workers):
"""
Integration test to ensure the async parsing path (_aget_nodes_from_documents)
completes without errors under various parameters.
"""
# Patch the async LLM call so it returns immediately
with patch.object(MockLLM, "achat", new=AsyncMock(return_value="ctx")):
llm = MockLLM()
parser = SlideNodeParser.from_defaults(
llm=llm,
chunk_size=chunk_size,
window_size=window_size,
llm_workers=llm_workers,
)
# Sample documents with multiple sentences
doc1 = Document(text="This is the first document. It has some sentences.")
doc2 = Document(text="This is the second document. Different content.")
doc3 = Document(text="And this is the third one. More text here.")
# Should run without raising, regardless of chunk_size/window_size/llm_workers
nodes = await parser.aget_nodes_from_documents([doc1, doc2, doc3])
# Basic sanity checks
assert isinstance(nodes, list)
# Every node should have the local_context metadata set
assert all("local_context" in node.metadata for node in nodes)
@pytest.mark.asyncio
async def test_parallel_achat_calls():
"""Ensure that with max_workers>1, LLM calls overlap (are run in parallel)."""
call_events = []
# Fake achat that logs when it starts and ends
async def fake_achat(self, messages):
# Record the start of this call
call_events.append(("start", asyncio.get_event_loop().time()))
# Simulate a little work
await asyncio.sleep(0.1)
# Record the end
call_events.append(("end", asyncio.get_event_loop().time()))
return "ctx"
# Patch the class method to use our fake_achat
with patch.object(MockLLM, "achat", new=fake_achat):
llm = MockLLM()
parser = SlideNodeParser.from_defaults(
llm=llm,
chunk_size=2, # ensure each sentence is its own chunk
window_size=1,
llm_workers=2, # allow up to 2 concurrent calls
)
# Two-sentence doc → 2 chunks → 2 achat calls
doc = Document(text="First. Second.")
nodes = await parser.aget_nodes_from_documents([doc])
# We should have exactly two nodes
assert len(nodes) == 2
# Now assert overlap: the second event in the log should be a "start"
# (i.e. the second LLM call started before the first one finished)
assert call_events[1][0] == "start", (
"Expected the second LLM call to start before the first one ended, "
f"but got call_events={call_events}"
)
@pytest.mark.asyncio
async def test_async_aparse_nodes_with_mock_llm():
"""Ensure the async parser path calls achat() once per chunk and attaches contexts."""
# Prepare a simple doc that will split into 3 chunks
text = "Sentence one. Sentence two. Sentence three."
document = Document(text=text)
# Patch MockLLM.achat at the class level
with patch.object(
MockLLM, "achat", new=AsyncMock(return_value="dummy async context")
) as mock_achat:
# Instantiate the LLM and parser as usual
mock_llm = MockLLM()
parser = SlideNodeParser.from_defaults(
llm=mock_llm,
chunk_size=3, # one sentence → one chunk
window_size=1,
llm_workers=2,
)
# Run the async path
nodes = await parser.aget_nodes_from_documents([document])
# Verify achat() was called once per chunk/node
assert mock_achat.call_count == len(nodes), (
f"Expected achat() to be called {len(nodes)} times, got {mock_achat.call_count}"
)
# And that each node has our dummy context
for node in nodes:
assert node.metadata["local_context"] == "dummy async context"
def test_empty_doc():
"""Ensure passing empty docs returns an empty List[TextNode]."""
warnings.warn(
"WARNING: This test may fail if the context length of MockLLM is changed.\n"
"Make sure chunk_size * window_size fits within MockLLM.context_window.",
UserWarning,
)
llm = MockLLM()
node_parser = SlideNodeParser.from_defaults(
chunk_size=1300, # setting non default values to match context length of mock LLM
window_size=3,
llm=llm,
)
nodes = node_parser.get_nodes_from_documents(documents=[Document(text="")])
print(nodes)
assert isinstance(nodes, list)
assert nodes == []
def test_short_text_less_than_window():
"""Ensure parser handles short input without window overflow."""
with patch.object(MockLLM, "chat", return_value="ctx"):
parser = SlideNodeParser.from_defaults(
llm=MockLLM(),
chunk_size=780,
window_size=5,
)
nodes = parser.get_nodes_from_documents([Document(text="One. Two.")])
assert len(nodes) > 0
assert all(node.metadata["local_context"] == "ctx" for node in nodes)
def test_llm_called_expected_times():
"""Ensure LLM.chat() is called once per chunk (class‐level patching)."""
# Prepare a document with 4 sentences
text = "Sentence one. Sentence two. Sentence three. Sentence four."
document = Document(text=text)
mock_llm = MockLLM()
# Patch the chat method on the class (so instance is bound to it cleanly)
with patch.object(MockLLM, "chat", return_value="dummy context") as mock_chat:
# Force each sentence to become its own chunk:
parser = SlideNodeParser.from_defaults(
llm=mock_llm,
chunk_size=3, # small enough that each sentence splits out
window_size=1, # window of just the chunk itself
)
# Run parser
nodes = parser.get_nodes_from_documents([document])
# We expect one LLM.chat call per node returned
assert mock_chat.call_count == len(nodes), (
f"Expected chat() to be called {len(nodes)} times, "
f"but got {mock_chat.call_count}"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/node_parser/llama-index-node-parser-slide/tests/test_node_parser_slide.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-faiss/llama_index/vector_stores/faiss/map_store.py | """
Faiss Map Vector Store index.
An index that is built on top of an existing vector store.
"""
import ast
import json
import os
from typing import Any, List, Optional, cast
import numpy as np
import fsspec
from fsspec.implementations.local import LocalFileSystem
from llama_index.core.schema import BaseNode
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.vector_stores.simple import DEFAULT_VECTOR_STORE, NAMESPACE_SEP
from llama_index.vector_stores.faiss.base import (
FaissVectorStore,
DEFAULT_PERSIST_PATH,
DEFAULT_PERSIST_FNAME,
)
from llama_index.core.vector_stores.types import (
DEFAULT_PERSIST_DIR,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
DEFAULT_ID_MAP_NAME = "id_map.json"
class FaissMapVectorStore(FaissVectorStore):
"""
Faiss Map Vector Store.
This wraps the base Faiss vector store and adds handling for
the Faiss IDMap and IDMap2 indexes. This allows for
update/delete functionality through node_id and faiss_id mapping.
Embeddings are stored within a Faiss index.
During query time, the index uses Faiss to query for the top
k embeddings, and returns the corresponding indices.
Args:
faiss_index (faiss.IndexIDMap or faiss.IndexIDMap2): Faiss id map index instance
Examples:
`pip install llama-index-vector-stores-faiss faiss-cpu`
```python
from llama_index.vector_stores.faiss import FaissMapVectorStore
import faiss
# create a faiss index
d = 1536 # dimension
faiss_index = faiss.IndexFlatL2(d)
# wrap it in an IDMap or IDMap2
id_map_index = faiss.IndexIDMap2(faiss_index)
vector_store = FaissMapVectorStore(faiss_index=id_map_index)
```
"""
# _node_id_to_faiss_id_map is used to map the node id to the faiss id
_node_id_to_faiss_id_map = PrivateAttr()
# _faiss_id_to_node_id_map is used to map the faiss id to the node id
_faiss_id_to_node_id_map = PrivateAttr()
def __init__(
self,
faiss_index: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss
except ImportError:
raise ImportError(import_err_msg)
if not isinstance(faiss_index, faiss.IndexIDMap) and not isinstance(
faiss_index, faiss.IndexIDMap2
):
raise ValueError(
"FaissVectorMapStore requires a faiss.IndexIDMap or faiss.IndexIDMap2 index. "
"Please create an IndexIDMap2 index and pass it to the FaissVectorMapStore."
)
super().__init__(faiss_index=faiss_index)
self._node_id_to_faiss_id_map = {}
self._faiss_id_to_node_id_map = {}
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
NOTE: in the Faiss vector store, we do not store text in Faiss.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
new_ids = []
for node in nodes:
text_embedding = node.get_embedding()
text_embedding_np = np.array(text_embedding, dtype="float32")[np.newaxis, :]
self._node_id_to_faiss_id_map[node.id_] = self._faiss_index.ntotal
self._faiss_id_to_node_id_map[self._faiss_index.ntotal] = node.id_
self._faiss_index.add_with_ids(
text_embedding_np, np.array([self._faiss_index.ntotal], dtype=np.int64)
)
new_ids.append(node.id_)
return new_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
# only handle delete on node_ids
if ref_doc_id in self._node_id_to_faiss_id_map:
faiss_id = self._node_id_to_faiss_id_map[ref_doc_id]
# remove the faiss id from the faiss index
self._faiss_index.remove_ids(np.array([faiss_id], dtype=np.int64))
# remove the node id from the node id map
if ref_doc_id in self._node_id_to_faiss_id_map:
del self._node_id_to_faiss_id_map[ref_doc_id]
# remove the faiss id from the faiss id map
if faiss_id in self._faiss_id_to_node_id_map:
del self._faiss_id_to_node_id_map[faiss_id]
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Delete nodes from vector store."""
if filters is not None:
raise NotImplementedError("Metadata filters not implemented for Faiss yet.")
if node_ids is None:
raise ValueError("node_ids must be provided to delete nodes.")
faiss_ids = []
for node_id in node_ids:
# get the faiss id from the node_id_map
faiss_id = self._node_id_to_faiss_id_map.get(node_id)
if faiss_id is not None:
faiss_ids.append(faiss_id)
if not faiss_ids:
return
self._faiss_index.remove_ids(np.array(faiss_ids, dtype=np.int64))
# cleanup references
for node_id in node_ids:
# get the faiss id from the node_id_map
faiss_id = self._node_id_to_faiss_id_map.get(node_id)
if faiss_id is not None and faiss_id in self._faiss_id_to_node_id_map:
del self._faiss_id_to_node_id_map[faiss_id]
if node_id in self._node_id_to_faiss_id_map:
del self._node_id_to_faiss_id_map[node_id]
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
similarity_top_k (int): top k most similar nodes
"""
if query.filters is not None:
raise ValueError("Metadata filters not implemented for Faiss yet.")
query_embedding = cast(List[float], query.query_embedding)
query_embedding_np = np.array(query_embedding, dtype="float32")[np.newaxis, :]
dists, indices = self._faiss_index.search(
query_embedding_np, query.similarity_top_k
)
dists = list(dists[0])
# if empty, then return an empty response
if len(indices) == 0:
return VectorStoreQueryResult(similarities=[], ids=[])
# returned dimension is 1 x k
node_idxs = indices[0]
filtered_dists = []
filtered_node_idxs = []
for dist, idx in zip(dists, node_idxs):
if idx < 0:
continue
filtered_dists.append(dist)
filtered_node_idxs.append(self._faiss_id_to_node_id_map[idx])
return VectorStoreQueryResult(
similarities=filtered_dists, ids=filtered_node_idxs
)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""
Save to file.
This method saves the vector store to disk.
Args:
persist_path (str): The save_path of the file.
"""
super().persist(persist_path=persist_path, fs=fs)
dirpath = os.path.dirname(persist_path)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
id_map = {}
id_map["node_id_to_faiss_id_map"] = self._node_id_to_faiss_id_map
id_map["faiss_id_to_node_id_map"] = self._faiss_id_to_node_id_map
# save the id map as JSON for safe deserialization
id_map_path = os.path.join(dirpath, DEFAULT_ID_MAP_NAME)
with open(id_map_path, "w") as f:
json.dump(id_map, f)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "FaissMapVectorStore":
persist_path = os.path.join(
persist_dir,
f"{DEFAULT_VECTOR_STORE}{NAMESPACE_SEP}{DEFAULT_PERSIST_FNAME}",
)
# only support local storage for now
if fs and not isinstance(fs, LocalFileSystem):
raise NotImplementedError("FAISS only supports local storage for now.")
return cls.from_persist_path(persist_path=persist_path, fs=None)
@classmethod
def from_persist_path(
cls,
persist_path: str,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "FaissMapVectorStore":
import faiss
# I don't think FAISS supports fsspec, it requires a path in the SWIG interface
# TODO: copy to a temp file and load into memory from there
if fs and not isinstance(fs, LocalFileSystem):
raise NotImplementedError("FAISS only supports local storage for now.")
if not os.path.exists(persist_path):
raise ValueError(f"No existing {__name__} found at {persist_path}.")
dirpath = os.path.dirname(persist_path)
id_map_path = os.path.join(dirpath, DEFAULT_ID_MAP_NAME)
if not os.path.exists(persist_path):
raise ValueError(f"No existing {__name__} found at {persist_path}.")
faiss_index = faiss.read_index(persist_path)
with open(id_map_path, "r") as f:
raw = f.read()
try:
id_map = json.loads(raw)
except json.JSONDecodeError:
# Fallback for files persisted with the old str() format
id_map = ast.literal_eval(raw)
map_vs = cls(faiss_index=faiss_index)
map_vs._node_id_to_faiss_id_map = {
k: int(v) for k, v in id_map["node_id_to_faiss_id_map"].items()
}
map_vs._faiss_id_to_node_id_map = {
int(k): v for k, v in id_map["faiss_id_to_node_id_map"].items()
}
return map_vs
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-faiss/llama_index/vector_stores/faiss/map_store.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/llama_index/core/memory/memory_blocks/fact.py | import re
from typing import Any, List, Optional, Union
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.bridge.pydantic import Field, field_validator
from llama_index.core.llms import LLM
from llama_index.core.memory.memory import BaseMemoryBlock
from llama_index.core.prompts import (
BasePromptTemplate,
RichPromptTemplate,
PromptTemplate,
)
from llama_index.core.settings import Settings
DEFAULT_FACT_EXTRACT_PROMPT = RichPromptTemplate("""You are a precise fact extraction system designed to identify key information from conversations.
INSTRUCTIONS:
1. Review the conversation segment provided prior to this message
2. Extract specific, concrete facts the user has disclosed or important information discovered
3. Focus on factual information like preferences, personal details, requirements, constraints, or context
4. Format each fact as a separate <fact> XML tag
5. Do not include opinions, summaries, or interpretations - only extract explicit information
6. Do not duplicate facts that are already in the existing facts list
<existing_facts>
{{ existing_facts }}
</existing_facts>
Return ONLY the extracted facts in this exact format:
<facts>
<fact>Specific fact 1</fact>
<fact>Specific fact 2</fact>
<!-- More facts as needed -->
</facts>
If no new facts are present, return: <facts></facts>""")
DEFAULT_FACT_CONDENSE_PROMPT = RichPromptTemplate("""You are a precise fact condensing system designed to identify key information from conversations.
INSTRUCTIONS:
1. Review the current list of existing facts
2. Condense the facts into a more concise list, less than {{ max_facts }} facts
3. Focus on factual information like preferences, personal details, requirements, constraints, or context
4. Format each fact as a separate <fact> XML tag
5. Do not include opinions, summaries, or interpretations - only extract explicit information
6. Do not duplicate facts that are already in the existing facts list
<existing_facts>
{{ existing_facts }}
</existing_facts>
Return ONLY the condensed facts in this exact format:
<facts>
<fact>Specific fact 1</fact>
<fact>Specific fact 2</fact>
<!-- More facts as needed -->
</facts>
If no new facts are present, return: <facts></facts>""")
def get_default_llm() -> LLM:
return Settings.llm
class FactExtractionMemoryBlock(BaseMemoryBlock[str]):
"""
A memory block that extracts key facts from conversation history using an LLM.
This block identifies and stores discrete facts disclosed during the conversation,
structuring them in XML format for easy parsing and retrieval.
"""
name: str = Field(
default="ExtractedFacts", description="The name of the memory block."
)
llm: LLM = Field(
default_factory=get_default_llm,
description="The LLM to use for fact extraction.",
)
facts: List[str] = Field(
default_factory=list,
description="List of extracted facts from the conversation.",
)
max_facts: int = Field(
default=50, description="The maximum number of facts to store."
)
fact_extraction_prompt_template: BasePromptTemplate = Field(
default=DEFAULT_FACT_EXTRACT_PROMPT,
description="Template for the fact extraction prompt.",
)
fact_condense_prompt_template: BasePromptTemplate = Field(
default=DEFAULT_FACT_CONDENSE_PROMPT,
description="Template for the fact condense prompt.",
)
@field_validator("fact_extraction_prompt_template", mode="before")
@classmethod
def validate_fact_extraction_prompt_template(
cls, v: Union[str, BasePromptTemplate]
) -> BasePromptTemplate:
if isinstance(v, str):
if "{{" in v and "}}" in v:
v = RichPromptTemplate(v)
else:
v = PromptTemplate(v)
return v
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> str:
"""Return the current facts as formatted text."""
if not self.facts:
return ""
return "\n".join([f"<fact>{fact}</fact>" for fact in self.facts])
async def _aput(self, messages: List[ChatMessage]) -> None:
"""Extract facts from new messages and add them to the facts list."""
# Skip if no messages
if not messages:
return
# Format existing facts for the prompt
existing_facts_text = ""
if self.facts:
existing_facts_text = "\n".join(
[f"<fact>{fact}</fact>" for fact in self.facts]
)
# Create the prompt
prompt_messages = self.fact_extraction_prompt_template.format_messages(
existing_facts=existing_facts_text,
)
# Get the facts extraction
response = await self.llm.achat(messages=[*messages, *prompt_messages])
# Parse the XML response to extract facts
facts_text = response.message.content or ""
new_facts = self._parse_facts_xml(facts_text)
# Add new facts to the list, avoiding exact-match duplicates
for fact in new_facts:
if fact not in self.facts:
self.facts.append(fact)
# Condense the facts if they exceed the max_facts
if len(self.facts) > self.max_facts:
existing_facts_text = "\n".join(
[f"<fact>{fact}</fact>" for fact in self.facts]
)
prompt_messages = self.fact_condense_prompt_template.format_messages(
existing_facts=existing_facts_text,
max_facts=self.max_facts,
)
response = await self.llm.achat(messages=[*messages, *prompt_messages])
new_facts = self._parse_facts_xml(response.message.content or "")
self.facts = new_facts
def _parse_facts_xml(self, xml_text: str) -> List[str]:
"""Parse facts from XML format."""
facts = []
# Extract content between <fact> tags
pattern = r"<fact>(.*?)</fact>"
matches = re.findall(pattern, xml_text, re.DOTALL)
# Clean up extracted facts
for match in matches:
fact = match.strip()
if fact:
facts.append(fact)
return facts
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/memory/memory_blocks/fact.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/llama_index/core/memory/memory_blocks/static.py | from typing import Any, List, Optional, Union
from llama_index.core.base.llms.types import ChatMessage, ContentBlock, TextBlock
from llama_index.core.bridge.pydantic import Field, field_validator
from llama_index.core.memory.memory import BaseMemoryBlock
class StaticMemoryBlock(BaseMemoryBlock[List[ContentBlock]]):
"""
A memory block that returns static text.
This block is useful for including constant information or instructions
in the context without relying on external processing.
"""
name: str = Field(
default="StaticContent", description="The name of the memory block."
)
static_content: Union[List[ContentBlock]] = Field(
description="Static text or content to be returned by this memory block."
)
@field_validator("static_content", mode="before")
@classmethod
def validate_static_content(
cls, v: Union[str, List[ContentBlock]]
) -> List[ContentBlock]:
if isinstance(v, str):
v = [TextBlock(text=v)]
return v
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> List[ContentBlock]:
"""Return the static text, potentially filtered by conditions."""
return self.static_content
async def _aput(self, messages: List[ChatMessage]) -> None:
"""No-op for static blocks as they don't change."""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/memory/memory_blocks/static.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-core/llama_index/core/memory/memory_blocks/vector.py | from typing import Any, Dict, List, Optional
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.base.llms.types import ChatMessage, TextBlock
from llama_index.core.bridge.pydantic import Field, field_validator
from llama_index.core.memory.memory import BaseMemoryBlock
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import (
BasePromptTemplate,
RichPromptTemplate,
PromptTemplate,
)
from llama_index.core.schema import TextNode, NodeWithScore
from llama_index.core.settings import Settings
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
)
DEFAULT_RETRIEVED_TEXT_TEMPLATE = RichPromptTemplate("{{ text }}")
def get_default_embed_model() -> BaseEmbedding:
return Settings.embed_model
class VectorMemoryBlock(BaseMemoryBlock[str]):
"""
A memory block that retrieves relevant information from a vector store.
This block stores conversation history in a vector store and retrieves
relevant information based on the most recent messages.
"""
name: str = Field(
default="RetrievedMessages", description="The name of the memory block."
)
vector_store: BasePydanticVectorStore = Field(
description="The vector store to use for retrieval."
)
embed_model: BaseEmbedding = Field(
default_factory=get_default_embed_model,
description="The embedding model to use for encoding queries and documents.",
)
similarity_top_k: int = Field(
default=2, description="Number of top results to return."
)
retrieval_context_window: int = Field(
default=5,
description="Maximum number of messages to include for context when retrieving.",
)
format_template: BasePromptTemplate = Field(
default=DEFAULT_RETRIEVED_TEXT_TEMPLATE,
description="Template for formatting the retrieved information.",
)
node_postprocessors: List[BaseNodePostprocessor] = Field(
default_factory=list,
description="List of node postprocessors to apply to the retrieved nodes containing messages.",
)
query_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional keyword arguments for the vector store query.",
)
@field_validator("vector_store", mode="before")
def validate_vector_store(cls, v: Any) -> "BasePydanticVectorStore":
if not isinstance(v, BasePydanticVectorStore):
raise ValueError("vector_store must be a BasePydanticVectorStore")
if not v.stores_text:
raise ValueError(
"vector_store must store text to be used as a retrieval memory block"
)
return v
@field_validator("format_template", mode="before")
@classmethod
def validate_format_template(cls, v: Any) -> "BasePromptTemplate":
if isinstance(v, str):
if "{{" in v and "}}" in v:
v = RichPromptTemplate(v)
else:
v = PromptTemplate(v)
return v
def _get_text_from_messages(self, messages: List[ChatMessage]) -> str:
"""Get the text from the messages."""
text = ""
for i, message in enumerate(messages):
for block in message.blocks:
if isinstance(block, TextBlock):
text += block.text
if len(messages) > 1 and i != len(messages) - 1:
text += " "
return text
async def _aget(
self,
messages: Optional[List[ChatMessage]] = None,
session_id: Optional[str] = None,
**block_kwargs: Any,
) -> str:
"""Retrieve relevant information based on recent messages."""
if not messages or len(messages) == 0:
return ""
# Use the last message or a context window of messages for the query
if (
self.retrieval_context_window > 1
and len(messages) >= self.retrieval_context_window
):
context = messages[-self.retrieval_context_window :]
else:
context = messages
query_text = self._get_text_from_messages(context)
if not query_text:
return ""
# Handle filtering by session_id
if session_id is not None:
filter = MetadataFilter(key="session_id", value=session_id)
if "filters" in self.query_kwargs and isinstance(
self.query_kwargs["filters"], MetadataFilters
):
# only add session_id filter if it does not exist in the filters list
session_id_filter_exists = False
for metadata_filter in self.query_kwargs["filters"].filters:
if (
isinstance(metadata_filter, MetadataFilter)
and metadata_filter.key == "session_id"
):
session_id_filter_exists = True
break
if not session_id_filter_exists:
self.query_kwargs["filters"].filters.append(filter)
else:
self.query_kwargs["filters"] = MetadataFilters(filters=[filter])
# Create and execute the query
query_embedding = await self.embed_model.aget_query_embedding(query_text)
query = VectorStoreQuery(
query_str=query_text,
query_embedding=query_embedding,
similarity_top_k=self.similarity_top_k,
**self.query_kwargs,
)
results = await self.vector_store.aquery(query)
nodes_with_scores = [
NodeWithScore(node=node, score=score)
for node, score in zip(results.nodes or [], results.similarities or [])
]
if not nodes_with_scores:
return ""
# Apply postprocessors
for postprocessor in self.node_postprocessors:
nodes_with_scores = await postprocessor.apostprocess_nodes(
nodes_with_scores, query_str=query_text
)
# Format the results
retrieved_text = "\n\n".join([node.get_content() for node in nodes_with_scores])
return self.format_template.format(text=retrieved_text)
async def _aput(self, messages: List[ChatMessage]) -> None:
"""Store messages in the vector store for future retrieval."""
if not messages:
return
# Format messages with role, text content, and additional info
texts = []
session_id = None
for message in messages:
text = self._get_text_from_messages([message])
if not text:
continue
# special case for session_id
if "session_id" in message.additional_kwargs:
session_id = message.additional_kwargs.pop("session_id")
if message.additional_kwargs:
text += f"\nAdditional Info: ({message.additional_kwargs!s})"
text = f"<message role='{message.role.value}'>{text}</message>"
texts.append(text)
if not texts:
return
# Get embeddings
text_node = TextNode(text="\n".join(texts), metadata={"session_id": session_id})
text_node.embedding = await self.embed_model.aget_text_embedding(text_node.text)
# Add to vector store, one node per entire message batch
await self.vector_store.async_add([text_node])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/memory/memory_blocks/vector.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/tests/memory/blocks/test_fact.py | import pytest
from typing import List
from llama_index.core.memory.memory_blocks.fact import (
FactExtractionMemoryBlock,
DEFAULT_FACT_EXTRACT_PROMPT,
DEFAULT_FACT_CONDENSE_PROMPT,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.llms import MockLLM
class MyMockLLM(MockLLM):
"""Test-specific subclass of MockLLM with mocked achat method."""
def __init__(self, *args, responses: List[ChatResponse], **kwargs):
super().__init__(*args, **kwargs)
self._responses = responses
self._index = 0
async def achat(self, messages: List[ChatMessage], **kwargs) -> ChatResponse:
response = self._responses[self._index]
self._index += 1
return response
@pytest.fixture
def mock_extraction_llm():
"""Create a mock LLM with extraction responses."""
return MyMockLLM(
responses=[
ChatResponse(
message=ChatMessage(
content="<facts><fact>John lives in New York</fact><fact>John is a software engineer</fact></facts>"
)
),
]
)
@pytest.fixture
def sample_messages():
"""Create sample chat messages."""
return [
ChatMessage(
role=MessageRole.USER, content="My name is John and I live in New York."
),
ChatMessage(role=MessageRole.ASSISTANT, content="Nice to meet you John!"),
ChatMessage(
role=MessageRole.USER,
content="I work as a software engineer and I'm allergic to peanuts.",
),
]
@pytest.mark.asyncio
async def test_initialization():
"""Test initialization of FactExtractionMemoryBlock."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
assert memory_block.facts == []
assert memory_block.fact_extraction_prompt_template == DEFAULT_FACT_EXTRACT_PROMPT
assert memory_block.fact_condense_prompt_template == DEFAULT_FACT_CONDENSE_PROMPT
# Test with custom prompt
custom_prompt = "Custom prompt"
memory_block = FactExtractionMemoryBlock(
fact_extraction_prompt_template=custom_prompt
)
assert memory_block.fact_extraction_prompt_template.template == custom_prompt
@pytest.mark.asyncio
async def test_aget_empty_facts():
"""Test aget method when no facts are present."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
result = await memory_block.aget()
assert result == ""
@pytest.mark.asyncio
async def test_aget_with_facts():
"""Test aget method with existing facts."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
memory_block.facts = ["John lives in New York", "John is a software engineer"]
result = await memory_block.aget()
assert (
result
== "<fact>John lives in New York</fact>\n<fact>John is a software engineer</fact>"
)
@pytest.mark.asyncio
async def test_aput_with_mocked_response(mock_extraction_llm, sample_messages):
"""Test aput method with a mocked LLM response."""
memory_block = FactExtractionMemoryBlock(llm=mock_extraction_llm)
await memory_block.aput(sample_messages)
# Verify the extracted facts
assert len(memory_block.facts) == 2
assert "John lives in New York" in memory_block.facts
assert "John is a software engineer" in memory_block.facts
@pytest.mark.asyncio
async def test_aput_with_empty_messages():
"""Test aput method with empty messages."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
await memory_block.aput([])
assert memory_block.facts == []
@pytest.mark.asyncio
async def test_aput_with_duplicate_facts(mock_extraction_llm, sample_messages):
"""Test aput method with duplicate facts."""
memory_block = FactExtractionMemoryBlock(llm=mock_extraction_llm)
memory_block.facts = ["John lives in New York"]
await memory_block.aput(sample_messages)
# Verify only new facts were added (no duplicates)
assert len(memory_block.facts) == 2
assert memory_block.facts == [
"John lives in New York",
"John is a software engineer",
]
@pytest.mark.asyncio
async def test_parse_facts_xml():
"""Test the _parse_facts_xml method."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
xml_text = """
<facts>
<fact>John lives in New York</fact>
<fact>John is a software engineer</fact>
<fact>John is allergic to peanuts</fact>
</facts>
"""
facts = memory_block._parse_facts_xml(xml_text)
assert len(facts) == 3
assert facts[0] == "John lives in New York"
assert facts[1] == "John is a software engineer"
assert facts[2] == "John is allergic to peanuts"
@pytest.mark.asyncio
async def test_parse_facts_xml_with_empty_response():
"""Test the _parse_facts_xml method with an empty response."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
xml_text = "<facts></facts>"
facts = memory_block._parse_facts_xml(xml_text)
assert facts == []
@pytest.mark.asyncio
async def test_parse_facts_xml_with_malformed_xml():
"""Test the _parse_facts_xml method with malformed XML."""
memory_block = FactExtractionMemoryBlock(llm=MockLLM())
xml_text = """
Some text without proper XML tags
<fact>This should be extracted</fact>
More text
<fact>This should also be extracted</fact>
"""
facts = memory_block._parse_facts_xml(xml_text)
assert len(facts) == 2
assert facts[0] == "This should be extracted"
assert facts[1] == "This should also be extracted"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/blocks/test_fact.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/memory/blocks/test_static.py | import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
TextBlock,
)
from llama_index.core.memory.memory_blocks.static import StaticMemoryBlock
@pytest.fixture
def sample_messages():
"""Create sample chat messages."""
return [
ChatMessage(role=MessageRole.USER, content="Hello, how are you?"),
ChatMessage(
role=MessageRole.ASSISTANT, content="I'm doing well, thanks for asking!"
),
ChatMessage(role=MessageRole.USER, content="What's the weather like today?"),
]
@pytest.mark.asyncio
async def test_initialization_with_string():
"""Test initialization of StaticMemoryBlock with a string."""
static_text = "This is some static content"
memory_block = StaticMemoryBlock(static_content=static_text)
assert memory_block.name == "StaticContent"
assert len(memory_block.static_content) == 1
assert isinstance(memory_block.static_content[0], TextBlock)
assert memory_block.static_content[0].text == static_text
@pytest.mark.asyncio
async def test_initialization_with_content_blocks():
"""Test initialization of StaticMemoryBlock with a list of ContentBlock."""
content_blocks = [TextBlock(text="First block"), TextBlock(text="Second block")]
memory_block = StaticMemoryBlock(static_content=content_blocks, name="CustomName")
assert memory_block.name == "CustomName"
assert len(memory_block.static_content) == 2
assert memory_block.static_content[0].text == "First block"
assert memory_block.static_content[1].text == "Second block"
@pytest.mark.asyncio
async def test_aget_returns_static_content():
"""Test that aget method returns the static content."""
static_text = "This is static content for testing"
memory_block = StaticMemoryBlock(static_content=static_text)
result = await memory_block.aget()
assert len(result) == 1
assert result[0].text == static_text
@pytest.mark.asyncio
async def test_aget_ignores_messages(sample_messages):
"""Test that aget method returns the same content regardless of messages."""
static_text = "Fixed content that doesn't change"
memory_block = StaticMemoryBlock(static_content=static_text)
result_with_messages = await memory_block.aget(messages=sample_messages)
result_without_messages = await memory_block.aget()
assert result_with_messages == result_without_messages
assert result_with_messages[0].text == static_text
@pytest.mark.asyncio
async def test_aput_does_nothing(sample_messages):
"""Test that aput method is a no-op and doesn't change the static content."""
static_text = "Unchanging content"
memory_block = StaticMemoryBlock(static_content=static_text)
# Get the content before calling aput
content_before = await memory_block.aget()
# Call aput with sample messages
await memory_block.aput(sample_messages)
# Get the content after calling aput
content_after = await memory_block.aget()
# Verify content hasn't changed
assert content_before == content_after
assert content_before[0].text == static_text
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/blocks/test_static.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/memory/blocks/test_vector.py | import pytest
from typing import Any, Dict, List, Sequence
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.embeddings import MockEmbedding
from llama_index.core.memory.memory_blocks.vector import VectorMemoryBlock
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import RichPromptTemplate
from llama_index.core.schema import BaseNode, TextNode, NodeWithScore
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
class MockVectorStore(BasePydanticVectorStore):
"""Mock vector store for testing."""
stores_text: bool = True
is_embedding_query: bool = True
def __init__(self):
super().__init__()
self._nodes = {}
@property
def client(self) -> Any:
return self
@property
def nodes(self) -> Dict[str, BaseNode]:
return self._nodes
def add(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]:
"""Add nodes to vector store."""
ids = []
for node in nodes:
self._nodes[node.id_] = node
ids.append(node.id_)
return ids
async def async_add(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]:
"""Async add nodes to vector store."""
return self.add(nodes, **kwargs)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes with ref_doc_id."""
for node_id in list(self._nodes.keys()):
if self._nodes[node_id].ref_doc_id == ref_doc_id:
del self._nodes[node_id]
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
# For simplicity, return all nodes
nodes = list(self._nodes.values())
if query.similarity_top_k and len(nodes) > query.similarity_top_k:
nodes = nodes[: query.similarity_top_k]
# Simulate similarity scores
similarities = [0.9 - 0.1 * i for i in range(len(nodes))]
ids = [node.id_ for node in nodes]
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""Async query vector store."""
return self.query(query, **kwargs)
class MockNodePostprocessor(BaseNodePostprocessor):
"""Mock node postprocessor for testing."""
def _postprocess_nodes(
self, nodes: List[NodeWithScore], query: Any = None
) -> List[NodeWithScore]:
"""Add a prefix to each node's text."""
for node in nodes:
if isinstance(node.node, TextNode):
node.node.text = f"PROCESSED: {node.node.text}"
return nodes
@pytest.fixture
def mock_embedding():
"""Create a mock embedding model."""
return MockEmbedding(embed_dim=10)
@pytest.fixture
def mock_vector_store():
"""Create a mock vector store."""
return MockVectorStore()
@pytest.fixture
def vector_memory_block(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Create a vector memory block."""
return VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
)
@pytest.mark.asyncio
async def test_vector_memory_block_put(vector_memory_block: VectorMemoryBlock):
"""Test putting messages in the vector memory block."""
# Create messages
messages = [
ChatMessage(role="user", content="Hello, how are you?"),
ChatMessage(role="assistant", content="I'm doing well, thank you for asking!"),
]
# Put messages in memory
await vector_memory_block.aput(messages=messages)
# Check that messages were added to vector store
assert len(vector_memory_block.vector_store.nodes) == 1
# Check node content contains both messages
node = next(iter(vector_memory_block.vector_store.nodes.values()))
assert "<message role='user'>Hello, how are you?</message>" in node.text
assert (
"<message role='assistant'>I'm doing well, thank you for asking!</message>"
in node.text
)
@pytest.mark.asyncio
async def test_vector_memory_block_get(vector_memory_block: VectorMemoryBlock):
"""Test getting messages from the vector memory block."""
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
ChatMessage(role="user", content="What about Germany?"),
ChatMessage(role="assistant", content="The capital of Germany is Berlin."),
]
await vector_memory_block.aput(messages=history_messages)
# Create a new query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information
result = await vector_memory_block.aget(messages=query_messages)
# Check that we got a result
assert result != ""
assert "capital of France is Paris" in result
@pytest.mark.asyncio
async def test_empty_messages(vector_memory_block: VectorMemoryBlock):
"""Test with empty messages."""
# Test empty get
result = await vector_memory_block.aget(messages=[])
assert result == ""
# Test empty put
await vector_memory_block.aput(messages=[])
assert len(vector_memory_block.vector_store.nodes) == 0
@pytest.mark.asyncio
async def test_message_without_text(vector_memory_block: VectorMemoryBlock):
"""Test with a message that has no text blocks."""
# Create a message with no text blocks
message = ChatMessage(role="user", content=None, blocks=[])
# Put the message in memory
await vector_memory_block.aput(messages=[message])
# Check that nothing was added
assert len(vector_memory_block.vector_store.nodes) == 0
@pytest.mark.asyncio
async def test_retrieval_context_window(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test the retrieval_context_window parameter."""
# Create a memory block with a specific context window
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
retrieval_context_window=2,
similarity_top_k=2,
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's your name?"),
ChatMessage(role="assistant", content="I'm an AI assistant."),
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query with multiple messages
query_messages = [
ChatMessage(role="user", content="What about the UK?"),
ChatMessage(role="assistant", content="The capital of the UK is London."),
ChatMessage(role="user", content="And Germany?"),
]
# The retrieval should only use the last 2 messages
result = await memory_block.aget(messages=query_messages)
# Check that we got a result
assert result != ""
# The result should be more related to UK/London than Paris
# In our mock implementation, it will just return all stored nodes
@pytest.mark.asyncio
async def test_node_postprocessors(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test node postprocessors."""
# Create a postprocessor
postprocessor = MockNodePostprocessor()
# Create a memory block with the postprocessor
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
node_postprocessors=[postprocessor],
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information - this should be processed
result = await memory_block.aget(messages=query_messages)
# Check that the result contains the processed prefix
assert "PROCESSED:" in result
@pytest.mark.asyncio
async def test_format_template(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test custom format template."""
# Create a memory block with a custom format template
custom_template = RichPromptTemplate("Relevant context: {{ text }}")
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
format_template=custom_template,
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information with custom format
result = await memory_block.aget(messages=query_messages)
# Check that the result contains our custom prefix
assert result.startswith("Relevant context:")
assert "capital of France is Paris" in result
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/blocks/test_vector.py",
"license": "MIT License",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/tests/pkg/test_cmd_exec.py | from unittest import mock
from click.testing import CliRunner
from llama_dev.cli import cli
def test_cmd_exec_no_package_no_all_flag():
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--cmd", "echo hello"])
assert result.exit_code != 0
assert "Either specify a package name or use the --all flag" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
def test_cmd_exec_invalid_package(mock_is_llama_index):
mock_is_llama_index.return_value = False
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "invalid-pkg", "--cmd", "echo hello"])
assert result.exit_code != 0
print(result.output)
assert "not a path to a LlamaIndex package" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.find_all_packages")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_all_flag(mock_subprocess, mock_find_all, data_path):
mock_find_all.return_value = [data_path / "fake/pkg1", data_path / "fake/pkg2"]
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--all", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_single_package_success(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 1
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_without_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0 # Command should continue despite failure
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_with_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(
cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello", "--fail-fast"]
)
assert result.exit_code != 0 # Command should fail at the first error
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_multiple_packages(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "pkg1", "pkg2", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_silent(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(
cli, ["pkg", "exec", "valid-pkg", "--silent", "--cmd", "echo hello"]
)
assert result.exit_code == 0
assert mock_subprocess.call_count == 1
assert result.output == ""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/pkg/test_cmd_exec.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/tests/pkg/test_info.py | from pathlib import Path
from unittest import mock
import pytest
from click.testing import CliRunner
from llama_dev.cli import cli
@pytest.fixture
def mock_projects():
return {
"package1": {"project": {"name": "llama-index-package1", "version": "0.1.0"}},
"package2": {"project": {"name": "llama-index-package2", "version": "0.2.0"}},
}
def test_info_with_package_names(mock_projects, data_path):
runner = CliRunner()
with (
mock.patch("llama_dev.pkg.info.is_llama_index_package", return_value=True),
mock.patch(
"llama_dev.pkg.info.load_pyproject",
side_effect=lambda p: mock_projects[p.name],
),
mock.patch("rich.table.Table.add_row") as mock_add_row,
):
result = runner.invoke(
cli, ["--repo-root", data_path, "pkg", "info", "package1", "package2"]
)
assert result.exit_code == 0
calls = mock_add_row.call_args_list
assert len(calls) == 2
assert {c.args for c in calls} == {
("llama-index-package2", "0.2.0", str(data_path / "package2")),
("llama-index-package1", "0.1.0", str(data_path / "package1")),
}
def test_info_with_all_flag(mock_projects):
runner = CliRunner()
package_paths = [Path("/fake/repo/root/package1"), Path("/fake/repo/root/package2")]
with (
mock.patch("llama_dev.pkg.info.find_all_packages", return_value=package_paths),
mock.patch(
"llama_dev.pkg.info.load_pyproject",
side_effect=lambda p: mock_projects[p.name],
),
mock.patch("rich.table.Table.add_row") as mock_add_row,
):
result = runner.invoke(cli, ["pkg", "info", "--all"])
assert result.exit_code == 0
assert mock_add_row.call_count == 2
mock_add_row.assert_any_call(
"llama-index-package1", "0.1.0", "/fake/repo/root/package1"
)
mock_add_row.assert_any_call(
"llama-index-package2", "0.2.0", "/fake/repo/root/package2"
)
def test_info_with_args_error():
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "info"])
assert result.exit_code != 0
assert "Either specify a package name or use the --all flag" in result.output
def test_info_invalid_package():
runner = CliRunner()
with mock.patch("llama_dev.pkg.info.is_llama_index_package", return_value=False):
result = runner.invoke(cli, ["pkg", "info", "invalid-package"])
assert result.exit_code != 0
assert "is not a path to a LlamaIndex package" in result.output
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/pkg/test_info.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/tests/test/test_test.py | from pathlib import Path
from unittest import mock
import pytest
from click.testing import CliRunner
from llama_dev import test as llama_dev_test
from llama_dev.cli import cli
from llama_dev.test import ResultStatus, _pytest, _run_tests
def mocked_coverage_failed(*args, **kwargs):
return {
"package": Path(__file__).parent.parent / "data" / "test_integration",
"status": ResultStatus.COVERAGE_FAILED,
"stdout": "",
"stderr": "Coverage below threshold",
"time": "0.1s",
}
def mocked_install_failed(*args, **kwargs):
return {
"package": Path(__file__).parent.parent / "data" / "test_integration",
"status": ResultStatus.INSTALL_FAILED,
"stdout": "",
"stderr": "Install failed",
"time": "0.1s",
}
def mocked_skip_failed_unsupported_python_version(*args, **kwargs):
return {
"package": Path(__file__).parent.parent / "data" / "test_integration",
"status": ResultStatus.UNSUPPORTED_PYTHON_VERSION,
"stdout": "",
"stderr": "Not compatible with Python",
"time": "0.1s",
}
def mocked_skip_failed_no_tests(*args, **kwargs):
return {
"package": Path(__file__).parent.parent / "data" / "test_integration",
"status": ResultStatus.NO_TESTS,
"stdout": "",
"stderr": "package has no tests",
"time": "0.1s",
}
def mocked_success(*args, **kwargs):
return {
"package": Path(__file__).parent.parent / "data" / "test_integration",
"status": ResultStatus.TESTS_PASSED,
"stdout": "",
"stderr": "",
"time": "0.1s",
}
@pytest.fixture
def changed_packages():
return {
Path("/fake/package/dependency1"),
Path("/fake/package/dependency2"),
}
@pytest.fixture
def package_data():
return {
"project": {
"dependencies": ["dependency1", "requests"],
"requires-python": ">=3.8",
}
}
def test_test_command_base_ref():
runner = CliRunner()
result = runner.invoke(cli, ["test", "--base-ref"])
assert result.exit_code != 0
assert "Error: Option '--base-ref' requires an argument." in result.output
result = runner.invoke(cli, ["test", "--base-ref="])
assert result.exit_code != 0
assert "Error: Option '--base-ref' cannot be empty." in result.output
def test_test_command_requires_base_ref_or_packages():
runner = CliRunner()
result = runner.invoke(cli, ["test"])
assert result.exit_code != 1
assert (
"Error: Either pass '--base-ref' or provide at least one package name."
in result.output
)
def test_test_command_cov_fail_under_requires_cov():
runner = CliRunner()
result = runner.invoke(
cli, ["test", "--base-ref", "main", "--cov-fail-under", "80"]
)
assert result.exit_code != 0
assert "You have to pass --cov in order to use --cov-fail-under" in result.output
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
@mock.patch("llama_dev.test.concurrent.futures.ProcessPoolExecutor")
def test_workers_parameter(
mock_pool,
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
):
# Setup minimal test data
mock_find_all_packages.return_value = set()
mock_get_changed_files.return_value = set()
mock_get_changed_packages.return_value = set()
mock_get_dependants.return_value = set()
runner = CliRunner()
runner.invoke(cli, ["test", "--base-ref", "main", "--workers", "16"])
mock_pool.assert_called_once_with(max_workers=16)
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_coverage_failures(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
monkeypatch,
data_path,
):
mock_find_all_packages.return_value = {Path("/fake/repo/package1")}
mock_get_changed_files.return_value = {Path("/fake/repo/package1/file.py")}
mock_get_changed_packages.return_value = {Path("/fake/repo/package1")}
mock_get_dependants.return_value = set()
monkeypatch.setattr(llama_dev_test, "_run_tests", mocked_coverage_failed)
runner = CliRunner()
result = runner.invoke(
cli,
[
"--repo-root",
data_path,
"test",
"--base-ref",
"main",
"--cov",
"--cov-fail-under",
"40",
],
)
# Check console output
assert result.exit_code == 1
assert "❌ test_integration failed" in result.stdout
assert "Error:\nCoverage below threshold" in result.stdout
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_install_failures(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
monkeypatch,
data_path,
):
mock_find_all_packages.return_value = {Path("/fake/repo/package1")}
mock_get_changed_files.return_value = {Path("/fake/repo/package1/file.py")}
mock_get_changed_packages.return_value = {Path("/fake/repo/package1")}
mock_get_dependants.return_value = set()
monkeypatch.setattr(llama_dev_test, "_run_tests", mocked_install_failed)
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "test", "--base-ref", "main"],
)
# Check console output
assert result.exit_code == 0
assert "❗ Unable to build package test_integration" in result.stdout
assert "Error:\nInstall failed" in result.stdout
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_skip_failures_no_tests(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
monkeypatch,
data_path,
):
mock_find_all_packages.return_value = {Path("/fake/repo/package1")}
mock_get_changed_files.return_value = {Path("/fake/repo/package1/file.py")}
mock_get_changed_packages.return_value = {Path("/fake/repo/package1")}
mock_get_dependants.return_value = set()
monkeypatch.setattr(llama_dev_test, "_run_tests", mocked_skip_failed_no_tests)
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "test", "--base-ref", "main"],
)
# Check console output
assert result.exit_code == 0
assert "1 packages were skipped" in result.stdout
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_skip_failures_unsupported_python(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
monkeypatch,
data_path,
):
mock_find_all_packages.return_value = {Path("/fake/repo/package1")}
mock_get_changed_files.return_value = {Path("/fake/repo/package1/file.py")}
mock_get_changed_packages.return_value = {Path("/fake/repo/package1")}
mock_get_dependants.return_value = set()
monkeypatch.setattr(
llama_dev_test, "_run_tests", mocked_skip_failed_unsupported_python_version
)
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "test", "--base-ref", "main"],
)
# Check console output
assert result.exit_code == 0
assert (
"1 packages were skipped due to Python version incompatibility" in result.stdout
)
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_success(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
monkeypatch,
data_path,
):
mock_find_all_packages.return_value = {Path("/fake/repo/package1")}
mock_get_changed_files.return_value = {Path("/fake/repo/package1/file.py")}
mock_get_changed_packages.return_value = {Path("/fake/repo/package1")}
mock_get_dependants.return_value = set()
monkeypatch.setattr(llama_dev_test, "_run_tests", mocked_success)
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "test", "--base-ref", "main"],
)
# Check console output
assert result.exit_code == 0
assert "Tests passed for 1 packages." in result.stdout
@mock.patch("llama_dev.test.find_all_packages")
@mock.patch("llama_dev.test.get_changed_files")
@mock.patch("llama_dev.test.get_changed_packages")
@mock.patch("llama_dev.test.get_dependants_packages")
def test_package_parameter(
mock_get_dependants,
mock_get_changed_packages,
mock_get_changed_files,
mock_find_all_packages,
data_path,
):
# Setup minimal test data
mock_find_all_packages.return_value = set()
mock_get_changed_files.return_value = set()
mock_get_changed_packages.return_value = set()
mock_get_dependants.return_value = set()
runner = CliRunner()
runner.invoke(
cli,
[
"--repo-root",
data_path,
"test",
"--base-ref",
"main",
"package_1",
"package_2",
],
)
mock_get_dependants.assert_called_with(
{data_path / "package_1", data_path / "package_2"}, set()
)
#
# Tests for the utility methods, we call them directly not through cli execution
#
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test__pytest(mock_subprocess):
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
_pytest(Path(), {}, cov=False)
assert mock_subprocess.call_args[0][0] == [
"uv",
"run",
"--no-sync",
"--",
"pytest",
"-q",
"--disable-warnings",
"--disable-pytest-warnings",
]
mock_subprocess.reset_mock()
_pytest(Path(), {}, cov=True)
assert mock_subprocess.call_args[0][0] == [
"uv",
"run",
"--no-sync",
"--",
"pytest",
"-q",
"--disable-warnings",
"--disable-pytest-warnings",
"--cov=.",
"--cov-report=xml",
]
def test_incompatible_python_version(changed_packages):
with (
mock.patch(
"llama_dev.test.load_pyproject",
return_value={"project": {"requires-python": ">=3.10"}},
),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=False),
mock.patch("llama_dev.test.package_has_tests", return_value=True),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.UNSUPPORTED_PYTHON_VERSION
assert "Not compatible with Python" in result["stderr"]
assert "package has no tests" not in result["stderr"]
def test_no_package_tests(changed_packages):
with (
mock.patch(
"llama_dev.test.load_pyproject",
return_value={"project": {"requires-python": ">=3.8"}},
),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch("llama_dev.test.package_has_tests", return_value=False),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.NO_TESTS
assert "package has no tests" in result["stderr"]
assert "Not compatible with Python" not in result["stderr"]
def test_install_dependencies_failure(changed_packages, package_data):
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(
returncode=1, stdout="stdout output", stderr="install error"
),
),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.INSTALL_FAILED
assert result["stderr"] == "install error"
def test_install_local_packages_failure(changed_packages, package_data):
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(returncode=0),
),
mock.patch(
"llama_dev.test.get_dep_names", return_value=["dependency1", "dependency2"]
),
mock.patch(
"llama_dev.test._uv_install_local",
return_value=mock.Mock(
returncode=1, stdout="stdout", stderr="local install error"
),
),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.INSTALL_FAILED
assert result["stderr"] == "local install error"
def test_pytest_failure(changed_packages, package_data):
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(returncode=0),
),
mock.patch("llama_dev.test.get_dep_names", return_value=["dependency1"]),
mock.patch(
"llama_dev.test._uv_install_local",
return_value=mock.Mock(returncode=0),
),
mock.patch(
"llama_dev.test._pytest",
return_value=mock.Mock(
returncode=1, stdout="test output", stderr="test failures"
),
),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.TESTS_FAILED
assert result["stdout"] == "test output"
assert result["stderr"] == "test failures"
def test_coverage_failure(changed_packages, package_data):
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(returncode=0),
),
mock.patch("llama_dev.test.get_dep_names", return_value=[]),
mock.patch(
"llama_dev.test._pytest",
return_value=mock.Mock(returncode=0, stdout="tests passed"),
),
mock.patch(
"llama_dev.test._diff_cover",
return_value=mock.Mock(
returncode=1,
stdout="coverage output",
stderr="coverage below threshold",
),
),
):
result = _run_tests(Path(), changed_packages, "main", True, 80)
assert result["status"] == ResultStatus.COVERAGE_FAILED
assert result["stderr"] == "coverage below threshold"
def test_successful_run(changed_packages, package_data):
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(returncode=0),
),
mock.patch("llama_dev.test.get_dep_names", return_value=[]),
mock.patch(
"llama_dev.test._pytest",
return_value=mock.Mock(returncode=0, stdout="all tests passed", stderr=""),
),
):
result = _run_tests(Path(), changed_packages, "main", False, 0)
assert result["status"] == ResultStatus.TESTS_PASSED
assert result["stdout"] == "all tests passed"
assert "time" in result
def test_successful_run_with_coverage(package_data, changed_packages):
"""Test a successful run with coverage checking."""
with (
mock.patch("llama_dev.test.load_pyproject", return_value=package_data),
mock.patch("llama_dev.test.is_python_version_compatible", return_value=True),
mock.patch(
"llama_dev.test._uv_sync",
return_value=mock.Mock(returncode=0),
),
mock.patch("llama_dev.test.get_dep_names", return_value=[]),
mock.patch(
"llama_dev.test._pytest",
return_value=mock.Mock(returncode=0, stdout="all tests passed", stderr=""),
),
mock.patch(
"llama_dev.test._diff_cover",
return_value=mock.Mock(returncode=0, stdout="coverage ok", stderr=""),
),
):
result = _run_tests(Path(), changed_packages, "main", True, 80)
assert result["status"] == ResultStatus.TESTS_PASSED
assert result["stdout"] == "coverage ok"
assert "time" in result
def test__trim():
from llama_dev.test import MAX_CONSOLE_PRINT_LINES, _trim
# Test with a short message (less than MAX_CONSOLE_PRINT_LINES)
short_msg = "Line 1\nLine 2\nLine 3"
assert _trim(False, short_msg) == short_msg
assert _trim(True, short_msg) == short_msg
# Test with a long message (more than MAX_CONSOLE_PRINT_LINES)
long_msg = "\n".join([f"Line {i}" for i in range(1, MAX_CONSOLE_PRINT_LINES + 10)])
# In non-debug mode, the message should be truncated
trimmed = _trim(False, long_msg)
trimmed_lines = trimmed.split("\n")
# Should have MAX_CONSOLE_PRINT_LINES lines plus the additional "truncated" message line
assert len(trimmed_lines) == MAX_CONSOLE_PRINT_LINES + 1
# The first MAX_CONSOLE_PRINT_LINES lines should be from the original message
for i in range(MAX_CONSOLE_PRINT_LINES):
assert trimmed_lines[i] == f"Line {i + 1}"
# The last line should be the truncation message
assert (
"<-- llama-dev: output truncated, pass '--debug' to see the full log -->"
in trimmed_lines[-1]
)
# In debug mode, the message should not be truncated
debug_trimmed = _trim(True, long_msg)
assert debug_trimmed == long_msg
assert (
len(debug_trimmed.split("\n")) == MAX_CONSOLE_PRINT_LINES + 9
) # Original number of lines
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/test/test_test.py",
"license": "MIT License",
"lines": 487,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_bestsellers.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonBestsellersReader(OxylabsBaseReader):
"""
Get data from Amazon Best Sellers pages.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/best-sellers
"""
top_level_header: str = "Bestsellers"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonBestsellersReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_bestsellers(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_bestsellers(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_bestsellers.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_pricing.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonPricingReader(OxylabsBaseReader):
"""
Get data about Amazon product offer listings.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/pricing
"""
top_level_header: str = "Product pricing data"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonPricingReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_pricing(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_pricing(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_pricing.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_product.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonProductReader(OxylabsBaseReader):
"""
Get data about Amazon product.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/product
"""
top_level_header: str = "Products"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonProductReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_product(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_product(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_product.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_reviews.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonReviewsReader(OxylabsBaseReader):
"""
Get data about Amazon product reviews.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/reviews
"""
top_level_header: str = "Reviews"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonReviewsReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_reviews(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_reviews(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_reviews.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_search.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonSearchReader(OxylabsBaseReader):
"""
Get data from the Amazon Search page.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/search
"""
top_level_header: str = "Search Results"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonSearchReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_search(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_search(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_search.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_sellers.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsAmazonSellersReader(OxylabsBaseReader):
"""
Get data about Amazon merchants.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/sellers
"""
top_level_header: str = "Sellers"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsAmazonSellersReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.amazon.scrape_sellers(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.amazon.scrape_sellers(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/amazon_sellers.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/base.py | import abc
from platform import architecture, python_version
from typing import Any, Optional
from importlib.metadata import version
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.oxylabs.utils import json_to_markdown
from oxylabs import RealtimeClient, AsyncClient
from oxylabs.sources.response import Response
class OxylabsBaseReader(BasePydanticReader, abc.ABC):
"""
Oxylabs Scraper base class.
https://developers.oxylabs.io/scraper-apis/web-scraper-api
"""
top_level_header: Optional[str] = None
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxyserps-dev.fun/v1/queries"
oxylabs_api: RealtimeClient
async_oxylabs_api: AsyncClient
def __init__(self, username: str, password: str, **data) -> None:
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-oxy-sdk-python/"
f"{version('llama-index-readers-oxylabs')} "
f"({python_version()}; {bits})"
)
data["oxylabs_api"] = RealtimeClient(username, password, sdk_type=sdk_type)
data["async_oxylabs_api"] = AsyncClient(username, password, sdk_type=sdk_type)
super().__init__(**data)
def _get_document_from_response(
self, response: list[dict] | list[list[dict]]
) -> Document:
processed_content = json_to_markdown(response, 0, self.top_level_header)
return Document(text=processed_content)
def load_data(self, payload: dict[str, Any]) -> list[Document]:
"""
Load data from Oxylabs API into the list of Documents.
Args:
payload (dict): Oxylabs API parameters as described
[here](https://developers.oxylabs.io/scraper-apis/
web-scraper-api/targets/generic-target#additional).
Returns:
List[Document]: List of documents.
"""
response = self.get_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
async def aload_data(self, payload: dict[str, Any]) -> list[Document]:
response = await self.aget_response(payload)
validated_responses = self._validate_response(response)
return [self._get_document_from_response(validated_responses)]
def get_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
async def aget_response(self, payload: dict[str, Any]) -> Response:
raise NotImplementedError
@staticmethod
def _validate_response(
response: Any,
) -> list[dict[Any, Any]] | list[list[dict[Any, Any]]]:
"""
Validate Oxylabs response format and unpack data.
"""
validated_results = []
try:
result_pages = response.raw["results"]
if not isinstance(result_pages, list) or not result_pages:
raise ValueError("No results returned!")
for result_page in result_pages:
result_page = dict(result_page)
content = result_page["content"]
if isinstance(content, list):
validated_results.append(content)
continue
if not isinstance(content, dict):
raise ValueError(
"Result `content` format error,"
" try setting parameter `parse` to True"
)
if "results" in content:
result = content["results"]
if isinstance(result, list):
validated_results.append(result)
elif isinstance(result, dict):
validated_results.append(result)
else:
raise ValueError("Response format Error!")
else:
validated_results.append(content)
return validated_results
except (KeyError, IndexError, TypeError, ValueError) as exc:
raise RuntimeError(f"Response Validation Error: {exc!s}") from exc
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/base.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_ads.py | from typing import Any
from oxylabs.sources.response import Response
from llama_index.readers.oxylabs.google_base import OxylabsGoogleBaseReader
class OxylabsGoogleAdsReader(OxylabsGoogleBaseReader):
"""
Get Google Search results data with paid ads.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/google/ads
"""
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsGoogleAdsReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.google.scrape_ads(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.google.scrape_ads(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_ads.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_base.py | from dataclasses import dataclass
from typing import Optional, Any
from llama_index.core import Document
from oxylabs.sources.response import Response
from llama_index.readers.oxylabs.base import OxylabsBaseReader
RESULT_CATEGORIES = [
"knowledge_graph",
"combined_search_result",
"product_information",
"local_information",
"search_information",
]
@dataclass
class ResponseElement:
tag: str
display_tag: str
path_: str
python_type: str
parent: Optional["ResponseElement"]
class OxylabsGoogleBaseReader(OxylabsBaseReader):
parsing_recursion_depth: int = 5
image_binary_content_attributes: list[str] = ["image_data", "data"]
excluded_result_attributes: list[str] = ["pos_overall"]
image_binary_content_array_attribute: str = "images"
binary_content_replacement: str = "Redacted base64 image string..."
include_binary_image_data: bool = False
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
def _get_document_from_response(
self, response: list[dict] | list[list[dict]]
) -> Document:
processed_content = self._process_responses(response)
return Document(text=processed_content)
def get_response(self, payload: dict) -> Response:
raise NotImplementedError(
"Not implemented in the base class! Use one the child classes instead!"
)
async def aget_response(self, payload: dict) -> Response:
raise NotImplementedError(
"Not implemented in the base class! Use one the child classes instead!"
)
@staticmethod
def validate_response_categories(result_categories: list) -> list:
validated_categories = []
for result_category in result_categories:
if result_category in RESULT_CATEGORIES:
validated_categories.append(result_category)
return validated_categories
def _process_responses(self, res: list[dict], **kwargs: Any) -> str:
result_ = "No good search result found"
result_category_processing_map = {
"knowledge_graph": self._create_knowledge_graph_snippets,
"combined_search_result": self._create_combined_search_result_snippets,
"product_information": self._create_product_information_snippets,
"local_information": self._create_local_information_snippets,
"search_information": self._create_search_information_snippets,
}
snippets: list[str] = []
validated_categories = self.validate_response_categories(
kwargs.get("result_categories", [])
)
result_categories_ = validated_categories or []
for validated_response in res:
if result_categories_:
for result_category in result_categories_:
result_category_processing_map[result_category](
validated_response, snippets
)
else:
for result_category in result_category_processing_map:
result_category_processing_map[result_category](
validated_response, snippets
)
if snippets:
result_ = "\n\n".join(snippets)
return result_
def _process_tags(
self, snippets_: list, tags_: list, results: dict, group_name: str = ""
) -> None:
check_tags = [tag_[0] in results for tag_ in tags_]
if any(check_tags):
for tag in tags_:
tag_content = results.get(tag[0], {}) or {}
if tag_content:
collected_snippets = self._recursive_snippet_collector(
tag_content,
max_depth=self.parsing_recursion_depth,
current_depth=0,
parent_=ResponseElement(
path_=f"{group_name}-{tag[0]}",
tag=tag[0],
display_tag=tag[1],
python_type=str(type(tag_content)),
parent=None,
),
)
if collected_snippets:
snippets_.append(collected_snippets)
def _recursive_snippet_collector(
self,
target_structure: Any,
max_depth: int,
current_depth: int,
parent_: ResponseElement,
) -> str:
target_snippets: list[str] = []
padding_multiplier = current_depth + 1
recursion_padding = " " * padding_multiplier
if current_depth >= max_depth:
return "\n".join(target_snippets)
if isinstance(target_structure, (str, float, int)):
self._recursion_process_simple_types(
parent_, recursion_padding, target_snippets, target_structure
)
elif isinstance(target_structure, dict):
self.recursion_process_dict(
current_depth,
max_depth,
parent_,
recursion_padding,
target_snippets,
target_structure,
)
elif isinstance(target_structure, (list, tuple)):
self.recursion_process_array(
current_depth,
max_depth,
parent_,
recursion_padding,
target_snippets,
target_structure,
)
return "\n".join(target_snippets)
def recursion_process_array(
self,
current_depth: int,
max_depth: int,
parent_: ResponseElement,
recursion_padding: str,
target_snippets: list,
target_structure: Any,
) -> None:
if target_structure:
target_snippets.append(
f"{recursion_padding}{parent_.display_tag.upper()} ITEMS: "
)
for nr_, element_ in enumerate(target_structure):
target_snippets.append(
self._recursive_snippet_collector(
element_,
max_depth=max_depth,
current_depth=current_depth + 1,
parent_=ResponseElement(
path_=f"{parent_.path_.upper()}-ITEM-{nr_ + 1}",
tag=parent_.tag.upper(),
display_tag=f"{parent_.tag.upper()}-ITEM-{nr_ + 1}",
python_type=str(type(target_structure)),
parent=parent_,
),
)
)
def recursion_process_dict(
self,
current_depth: int,
max_depth: int,
parent_: ResponseElement,
recursion_padding: str,
target_snippets: list,
target_structure: Any,
) -> None:
if not target_structure:
return
target_snippets.append(f"{recursion_padding}{parent_.display_tag.upper()}: ")
for key_, value_ in target_structure.items():
if isinstance(value_, dict) and value_:
target_snippets.append(f"{recursion_padding}{key_.upper()}: ")
target_snippets.append(
self._recursive_snippet_collector(
value_,
max_depth=max_depth,
current_depth=current_depth + 1,
parent_=ResponseElement(
path_=f"{parent_.path_.upper()}-{key_.upper()}",
tag=key_.upper(),
display_tag=key_.upper(),
python_type=str(type(value_)),
parent=parent_,
),
)
)
elif isinstance(value_, (list, tuple)) and value_:
target_snippets.append(f"{recursion_padding}{key_.upper()} ITEMS: ")
for nr_, _element in enumerate(value_):
target_snippets.append(
self._recursive_snippet_collector(
_element,
max_depth=max_depth,
current_depth=current_depth + 1,
parent_=ResponseElement(
path_=f"{parent_.path_.upper()}"
f"-{key_.upper()}-ITEM-{nr_ + 1}",
tag=key_.upper(),
display_tag=f"{key_.upper()}-ITEM-{nr_ + 1}",
python_type=str(type(value_)),
parent=parent_,
),
)
)
elif isinstance(value_, (str, float, int)) and value_:
if (
key_ in self.image_binary_content_attributes
and not self.include_binary_image_data
):
value_ = self.binary_content_replacement
if key_ not in self.excluded_result_attributes:
target_snippets.append(
f"{recursion_padding}{key_.upper()}: {value_!s}"
)
def _recursion_process_simple_types(
self,
parent_: ResponseElement,
recursion_padding: str,
target_snippets: list,
target_structure: Any,
) -> None:
if not target_structure:
return
if parent_.python_type == str(type([])):
if (
self.image_binary_content_array_attribute.upper()
in parent_.path_.split("-")[-3:]
or parent_.tag.lower() in self.image_binary_content_attributes
) and not self.include_binary_image_data:
target_structure = self.binary_content_replacement
target_snippets.append(
f"{recursion_padding}{parent_.display_tag}: {target_structure!s}"
)
elif parent_.python_type == str(type({})):
if (
parent_.tag.lower() in self.image_binary_content_attributes
and not self.include_binary_image_data
):
target_structure = self.binary_content_replacement
if parent_.tag.lower() not in self.excluded_result_attributes:
target_snippets.append(
f"{recursion_padding}{parent_.display_tag}: {target_structure!s}"
)
def _create_knowledge_graph_snippets(
self, results: dict, knowledge_graph_snippets: list
) -> None:
knowledge_graph_tags = [
("knowledge", "Knowledge Graph"),
("recipes", "Recipes"),
("item_carousel", "Item Carousel"),
("apps", "Apps"),
]
self._process_tags(
knowledge_graph_snippets, knowledge_graph_tags, results, "Knowledge"
)
def _create_combined_search_result_snippets(
self, results: dict, combined_search_result_snippets: list
) -> None:
combined_search_result_tags = [
("organic", "Organic Results"),
("organic_videos", "Organic Videos"),
("paid", "Paid Results"),
("featured_snipped", "Feature Snipped"),
("top_stories", "Top Stories"),
("finance", "Finance"),
("sports_games", "Sports Games"),
("twitter", "Twitter"),
("discussions_and_forums", "Discussions and Forums"),
("images", "Images"),
("videos", "Videos"),
("video_box", "Video box"),
]
self._process_tags(
combined_search_result_snippets,
combined_search_result_tags,
results,
"Combined Search Results",
)
def _create_product_information_snippets(
self, results: dict, product_information_snippets: list
) -> None:
product_information_tags = [
("popular_products", "Popular Products"),
("pla", "Product Listing Ads (PLA)"),
]
self._process_tags(
product_information_snippets,
product_information_tags,
results,
"Product Information",
)
def _create_local_information_snippets(
self, results: dict, local_information_snippets: list
) -> None:
local_information_tags = [
("top_sights", "Top Sights"),
("flights", "Flights"),
("hotels", "Hotels"),
("local_pack", "Local Pack"),
("local_service_ads", "Local Service Ads"),
("jobs", "Jobs"),
]
self._process_tags(
local_information_snippets,
local_information_tags,
results,
"Local Information",
)
def _create_search_information_snippets(
self, results: dict, search_information_snippets: list
) -> None:
search_information_tags = [
("search_information", "Search Information"),
("related_searches", "Related Searches"),
("related_searches_categorized", "Related Searches Categorized"),
("related_questions", "Related Questions"),
]
self._process_tags(
search_information_snippets,
search_information_tags,
results,
"Search Information",
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_base.py",
"license": "MIT License",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_search.py | from typing import Any
from oxylabs.sources.response import Response
from llama_index.readers.oxylabs.google_base import OxylabsGoogleBaseReader
class OxylabsGoogleSearchReader(OxylabsGoogleBaseReader):
"""
Get Google Search results data.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/google/search/search
"""
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsGoogleSearchReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.google.scrape_search(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.google.scrape_search(**payload)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_search.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/utils.py | from typing import Any, Optional
def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"# {header}\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
# nl var to enable the usage of this symbol inside f-string expressions
nl = "\n"
markdown_parts.append(f"{indent}> {data.replace(nl, nl + indent + '> ')}\n")
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/utils.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/youtube_transcripts.py | from typing import Any
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from oxylabs.sources.response import Response
class OxylabsYoutubeTranscriptReader(OxylabsBaseReader):
"""
Get YouTube video transcripts.
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/youtube/youtube-transcript
"""
top_level_header: str = "YouTube video transcripts"
def __init__(self, username: str, password: str, **data) -> None:
super().__init__(username=username, password=password, **data)
@classmethod
def class_name(cls) -> str:
return "OxylabsYoutubeTranscriptReader"
def get_response(self, payload: dict[str, Any]) -> Response:
return self.oxylabs_api.youtube_transcript.scrape_transcript(**payload)
async def aget_response(self, payload: dict[str, Any]) -> Response:
return await self.async_oxylabs_api.youtube_transcript.scrape_transcript(
**payload
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/youtube_transcripts.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-oxylabs/tests/test_readers_oxylabs.py | import os
from pathlib import Path
from unittest.mock import MagicMock, AsyncMock
import pytest
from llama_index.core.readers.base import BaseReader
from llama_index.readers.oxylabs.base import OxylabsBaseReader
from llama_index.readers.oxylabs import (
OxylabsAmazonBestsellersReader,
OxylabsAmazonPricingReader,
OxylabsAmazonProductReader,
OxylabsAmazonReviewsReader,
OxylabsAmazonSellersReader,
OxylabsAmazonSearchReader,
OxylabsGoogleAdsReader,
OxylabsGoogleSearchReader,
OxylabsYoutubeTranscriptReader,
)
TEST_ROOT = Path(__file__).parent.resolve()
def test_class():
names_of_base_classes = [b.__name__ for b in OxylabsBaseReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def get_response() -> MagicMock:
mock = MagicMock()
mock.raw = {
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
}
return mock
READER_TESTS_PARAMS = [
pytest.param(
OxylabsAmazonBestsellersReader,
{
"source": "amazon_bestsellers",
"domain": "com",
"query": "120225786011",
"render": "html",
"start_page": 1,
"parse": True,
},
id="amazon_bestsellers_successful_response",
),
pytest.param(
OxylabsAmazonPricingReader,
{
"query": "B087TXHLVQ",
"parse": True,
},
id="amazon_pricing_successful_response",
),
pytest.param(
OxylabsAmazonProductReader,
{
"query": "B087TXHLVQ",
"parse": True,
},
id="amazon_product_successful_response",
),
pytest.param(
OxylabsAmazonReviewsReader,
{
"query": "B087TXHLVQ",
"parse": True,
},
id="amazon_reviews_successful_response",
),
pytest.param(
OxylabsAmazonSellersReader,
{
"query": "A2U55XLSPNCN01",
"parse": True,
},
id="amazon_sellers_successful_response",
),
pytest.param(
OxylabsAmazonSearchReader,
{
"query": "headsets",
"parse": True,
},
id="amazon_search_successful_response",
),
pytest.param(
OxylabsYoutubeTranscriptReader,
{
"query": "SLoqvcnwwN4",
"context": [
{"key": "language_code", "value": "en"},
{"key": "transcript_origin", "value": "uploader_provided"},
],
},
id="youtube_transcript_response",
),
]
@pytest.mark.parametrize(
("reader_class", "payload"),
READER_TESTS_PARAMS,
)
@pytest.mark.unit
def test_sync_oxylabs_readers(
reader_class: type[OxylabsBaseReader],
payload: dict,
):
get_response_mock = MagicMock()
get_response_mock.return_value = get_response()
reader_class.get_response = get_response_mock
reader = reader_class(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
docs = reader.load_data(payload)
assert (
docs[0].text == f"# {reader.top_level_header}\n"
f"- Item 1:\n ## key1\n value1\n\n ## key2\n value2\n"
)
assert get_response_mock.call_args[0][0] == payload
@pytest.mark.parametrize(
("reader_class", "payload"),
READER_TESTS_PARAMS,
)
@pytest.mark.asyncio
@pytest.mark.unit
async def test_async_oxylabs_readers(
reader_class: type[OxylabsBaseReader],
payload: dict,
):
get_response_mock = AsyncMock()
get_response_mock.return_value = get_response()
reader_class.aget_response = get_response_mock
reader = reader_class(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
docs = await reader.aload_data(payload)
assert (
docs[0].text == f"# {reader.top_level_header}\n"
f"- Item 1:\n ## key1\n value1\n\n ## key2\n value2\n"
)
assert get_response_mock.call_args[0][0] == payload
GOOGLE_READER_TESTS_PARAMS = [
pytest.param(
OxylabsGoogleSearchReader,
"google_search",
{
"query": "iPhone 16",
"parse": True,
},
id="google_search_successful_response",
),
pytest.param(
OxylabsGoogleAdsReader,
"google_ads",
{
"query": "iPhone 16",
"parse": True,
},
id="google_ads_successful_response",
),
]
@pytest.mark.skipif(
not (os.environ.get("OXYLABS_USERNAME") and os.environ.get("OXYLABS_PASSWORD")),
reason="No Oxylabs creds",
)
@pytest.mark.parametrize(
("reader_class", "name", "payload"),
GOOGLE_READER_TESTS_PARAMS,
)
@pytest.mark.integration
def test_sync_google_oxylabs_readers(
reader_class: type[OxylabsBaseReader],
name: str,
payload: dict,
):
reader = reader_class(
username=os.environ.get("OXYLABS_USERNAME"),
password=os.environ.get("OXYLABS_PASSWORD"),
)
docs = reader.load_data(payload)
assert len(docs) == 1
text = docs[0].text
assert len(text) > 1000
assert "ORGANIC RESULTS ITEMS" in text
assert "SEARCH INFORMATION" in text
assert "RELATED SEARCHES ITEMS" in text
@pytest.mark.skipif(
not (os.environ.get("OXYLABS_USERNAME") and os.environ.get("OXYLABS_PASSWORD")),
reason="No Oxylabs creds",
)
@pytest.mark.parametrize(
("reader_class", "name", "payload"),
GOOGLE_READER_TESTS_PARAMS,
)
@pytest.mark.asyncio
@pytest.mark.integration
async def test_async_google_oxylabs_readers(
reader_class: type[OxylabsBaseReader],
name: str,
payload: dict,
):
reader = reader_class(
username=os.environ.get("OXYLABS_USERNAME"),
password=os.environ.get("OXYLABS_PASSWORD"),
)
docs = await reader.aload_data(payload)
assert len(docs) == 1
text = docs[0].text
assert len(text) > 1000
assert "ORGANIC RESULTS ITEMS" in text
assert "SEARCH INFORMATION" in text
assert "RELATED SEARCHES ITEMS" in text
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-oxylabs/tests/test_readers_oxylabs.py",
"license": "MIT License",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/oxylabs_web/base.py | """Oxylabs Web Reader."""
import asyncio
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from platform import architecture, python_version
from importlib.metadata import version
from llama_index.core.bridge.pydantic import Field
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from markdownify import markdownify
from llama_index.readers.web.oxylabs_web.utils import strip_html, json_to_markdown
if TYPE_CHECKING:
from oxylabs.internal.api import AsyncAPI, RealtimeAPI
def get_default_config() -> dict[str, Any]:
from oxylabs.utils.utils import prepare_config
return prepare_config(async_integration=True)
class OxylabsWebReader(BasePydanticReader):
"""
Scrape any website with Oxylabs Web Scraper API and get results in Markdown format.
[See the API documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites)
Args:
username: Oxylabs API username.
password: Oxylabs API password.
Example:
.. code-block:: python
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
reader = OxylabsWebReader(
username=os.environ["OXYLABS_USERNAME"], password=os.environ["OXYLABS_PASSWORD"]
)
docs = reader.load_data(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2"
],
{
"parse": True,
}
)
print(docs[0].text)
"""
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxylabs.io/v1/queries"
api: "RealtimeAPI"
async_api: "AsyncAPI"
default_config: dict[str, Any] = Field(default_factory=get_default_config)
def __init__(self, username: str, password: str, **kwargs) -> None:
from oxylabs.internal.api import AsyncAPI, APICredentials, RealtimeAPI
credentials = APICredentials(username=username, password=password)
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-web-sdk-python/"
f"{version('llama-index-readers-web')} "
f"({python_version()}; {bits})"
)
api = RealtimeAPI(credentials, sdk_type=sdk_type)
async_api = AsyncAPI(credentials, sdk_type=sdk_type)
super().__init__(api=api, async_api=async_api, **kwargs)
@classmethod
def class_name(cls) -> str:
return "OxylabsWebReader"
def _get_document_from_response(self, response: dict[str, Any]) -> Document:
content = response["results"][0]["content"]
if isinstance(content, (dict, list)):
text = json_to_markdown(content)
else:
striped_html = strip_html(str(content))
text = markdownify(striped_html)
return Document(
metadata={"oxylabs_job": response["job"]},
text=text,
)
async def aload_data(
self,
urls: list[str],
additional_params: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Asynchronously load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary of scraper parameters as described
[here](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional)
"""
if additional_params is None:
additional_params = {}
responses = await asyncio.gather(
*[
self.async_api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
)
return [
self._get_document_from_response(response)
for response in responses
if response
]
def load_data(
self,
urls: list[str],
additional_params: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary of scraper parameters as described
[here](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional)
"""
if additional_params is None:
additional_params = {}
responses = [
self.api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
return [
self._get_document_from_response(response)
for response in responses
if response
]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/oxylabs_web/base.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/oxylabs_web/utils.py | import re
from typing import Any, Optional
from lxml.html import defs, fromstring, tostring
from lxml.html.clean import Cleaner
def clean_html(html: str) -> str:
"""Clean an HTML string."""
cleaner = Cleaner(
scripts=True,
javascript=True,
style=True,
remove_tags=[],
kill_tags=["nav", "svg", "footer", "noscript", "script", "form"],
safe_attrs=[*list(defs.safe_attrs), "idx"],
comments=True,
inline_style=True,
links=True,
meta=False,
page_structure=False,
embedded=True,
frames=False,
forms=False,
annoying_tags=False,
)
return cleaner.clean_html(html) # type: ignore[no-any-return]
def strip_html(html: str) -> str:
"""
Simplify an HTML string.
Will remove unwanted elements, attributes, and redundant content
Args:
html (str): The input HTML string.
Returns:
str: The cleaned and simplified HTML string.
"""
cleaned_html = clean_html(html)
html_tree = fromstring(cleaned_html)
for element in html_tree.iter():
if "style" in element.attrib:
del element.attrib["style"]
if (
(
not element.attrib
or (len(element.attrib) == 1 and "idx" in element.attrib)
)
and not element.getchildren() # type: ignore[attr-defined]
and (not element.text or not element.text.strip())
and (not element.tail or not element.tail.strip())
):
parent = element.getparent()
if parent is not None:
parent.remove(element)
xpath_query = (
".//*[contains(@class, 'footer') or contains(@id, 'footer') or "
"contains(@class, 'hidden') or contains(@id, 'hidden')]"
)
elements_to_remove = html_tree.xpath(xpath_query)
for element in elements_to_remove: # type: ignore[assignment, union-attr]
parent = element.getparent()
if parent is not None:
parent.remove(element)
stripped_html = tostring(html_tree, encoding="unicode")
stripped_html = re.sub(r"\s{2,}", " ", stripped_html)
return re.sub(r"\n{2,}", "", stripped_html)
def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"{indent}- *{header}*\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
cleaned_data = data.replace("\n", "\n" + indent + "> ")
markdown_parts.append(f"{indent}> {cleaned_data}\n")
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/oxylabs_web/utils.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_readers_oxylabs.py | from unittest.mock import MagicMock, AsyncMock
import pytest
import sys
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
{
"results": [{"content": {"key1": "value1", "key2": "value2"}}],
"job": {"job_id": 42424242},
},
"# key1\n value1\n\n# key2\n value2\n",
id="response_success",
)
skip_if_py39_or_lower = sys.version_info < (3, 10)
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
def test_sync_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = MagicMock()
get_response_mock.return_value = return_value
reader.api.get_response = get_response_mock
docs = reader.load_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
@pytest.mark.skipif(skip_if_py39_or_lower, reason="Pytest does not support Python 3.9")
@pytest.mark.parametrize(
("urls", "additional_params", "return_value", "expected_output"),
[READER_TEST_PARAM],
)
@pytest.mark.asyncio
async def test_async_oxylabs_reader(
urls: list[str],
additional_params: dict,
return_value: dict,
expected_output: str,
):
reader = OxylabsWebReader(
username="OXYLABS_USERNAME",
password="OXYLABS_PASSWORD",
)
get_response_mock = AsyncMock()
get_response_mock.return_value = return_value
reader.async_api.get_response = get_response_mock
docs = await reader.aload_data(urls, additional_params)
for doc in docs:
assert doc.text == expected_output
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_readers_oxylabs.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/llama_dev/cli.py | from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .release import release
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
"warning": "magenta",
"error": "bold red",
}
)
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
@click.option(
"--repo-root",
default=".",
help="Path to the llama_index repository, defaults to '.'",
)
@click.option("--debug", is_flag=True, help="Enable verbose output.")
@click.pass_context
def cli(ctx, repo_root: str, debug: bool):
"""The official CLI for development, testing, and automation in the LlamaIndex monorepo."""
ctx.obj = {
"console": Console(theme=LLAMA_DEV_THEME, soft_wrap=True),
"repo_root": Path(repo_root).resolve(),
"debug": debug,
}
cli.add_command(pkg)
cli.add_command(test)
cli.add_command(release)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/cli.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-dev/llama_dev/pkg/cmd_exec.py | import os
import subprocess
from pathlib import Path
import click
from llama_dev.utils import find_all_packages, is_llama_index_package
@click.command(short_help="Exec a command inside a package folder")
@click.option(
"--fail-fast",
is_flag=True,
default=False,
help="Exit the command at the first failure",
)
@click.option(
"--all",
is_flag=True,
help="Get info for all the packages in the monorepo",
)
@click.argument("package_names", required=False, nargs=-1)
@click.option(
"--cmd",
required=True,
help="The command to execute (use quotes around the full command)",
)
@click.option(
"--silent",
is_flag=True,
default=False,
help="Only print errors",
)
@click.pass_obj
def cmd_exec(
obj: dict, all: bool, package_names: tuple, cmd: str, fail_fast: bool, silent: bool
):
if not all and not package_names:
raise click.UsageError("Either specify a package name or use the --all flag")
console = obj["console"]
packages: set[Path] = set()
# Do not use the virtual environment calling llama-dev, if any
env = os.environ.copy()
if "VIRTUAL_ENV" in env:
del env["VIRTUAL_ENV"]
if all:
packages = set(find_all_packages(obj["repo_root"]))
else:
for package_name in package_names:
package_path = obj["repo_root"] / package_name
if not is_llama_index_package(package_path):
raise click.UsageError(
f"{package_name} is not a path to a LlamaIndex package"
)
packages.add(package_path)
with console.status(f"[bold green]Running '{cmd}'...") as status:
for package in packages:
result = subprocess.run(
cmd.split(" "),
cwd=package,
text=True,
capture_output=True,
env=env,
)
if result.returncode != 0:
msg = f"Command '{cmd}' failed in {package.relative_to(obj['repo_root'])}: {result.stderr}"
if fail_fast:
raise click.ClickException(msg)
else:
console.print(msg, style="bold red")
else:
if not silent:
console.print(result.stdout)
console.log(
f"Command succeeded in {package.relative_to(obj['repo_root'])}"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/pkg/cmd_exec.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-dev/llama_dev/pkg/info.py | import json
import click
from rich.table import Table
from llama_dev.utils import find_all_packages, is_llama_index_package, load_pyproject
@click.command(short_help="Get package details")
@click.argument("package_names", required=False, nargs=-1)
@click.option(
"--all",
is_flag=True,
help="Get info for all the packages in the monorepo",
)
@click.option(
"--json",
"use_json",
is_flag=True,
default=False,
help="Use JSON as the output format",
)
@click.pass_obj
def info(obj: dict, all: bool, use_json: bool, package_names: tuple):
if not all and not package_names:
raise click.UsageError("Either specify a package name or use the --all flag")
packages = set()
if all:
packages = find_all_packages(obj["repo_root"])
else:
for package_name in package_names:
package_path = obj["repo_root"] / package_name
if not is_llama_index_package(package_path):
raise click.UsageError(
f"{package_name} is not a path to a LlamaIndex package"
)
packages.add(package_path)
if use_json:
data = {}
for package in packages:
package_data = load_pyproject(package)
data["name"] = package_data["project"]["name"]
data["version"] = package_data["project"]["version"]
data["path"] = str(package)
obj["console"].print(json.dumps(data))
else:
table = Table(box=None)
table.add_column("Name")
table.add_column("Version")
table.add_column("Path")
for package in packages:
package_data = load_pyproject(package)
table.add_row(
package_data["project"]["name"],
package_data["project"]["version"],
str(package),
)
obj["console"].print(table)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/pkg/info.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-dev/llama_dev/utils.py | import re
import subprocess
import sys
from enum import Enum
from pathlib import Path
import tomli
from packaging import specifiers, version
from packaging.version import Version
DEP_NAME_REGEX = re.compile(r"([^<>=\[\];\s]+)")
class BumpType(str, Enum):
MAJOR = "major"
MINOR = "minor"
PATCH = "patch"
def bump_version(current_version: str, bump_type: BumpType) -> str:
"""Bump a version string according to semver rules."""
v = Version(current_version)
# Parse the version components
release = v.release
major = release[0] if len(release) > 0 else 0
minor = release[1] if len(release) > 1 else 0
micro = release[2] if len(release) > 2 else 0
version_str = ""
if bump_type == BumpType.MAJOR:
version_str = f"{major + 1}.0.0"
elif bump_type == BumpType.MINOR:
version_str = f"{major}.{minor + 1}.0"
elif bump_type == BumpType.PATCH:
version_str = f"{major}.{minor}.{micro + 1}"
return version_str
def update_pyproject_version(package_path: Path, new_version: str) -> None:
"""Update the version in a pyproject.toml file."""
pyproject_path = package_path / "pyproject.toml"
# Read the file content
with open(pyproject_path, "r") as f:
content = f.read()
pattern = r'^version = "[^"]+"'
new_content = re.sub(
pattern, f'version = "{new_version}"', content, flags=re.MULTILINE
)
# Write the updated content back
with open(pyproject_path, "w") as f:
f.write(new_content)
def package_has_tests(package_path: Path) -> bool:
"""Returns whether a package folder contains a 'tests' subfolder."""
tests_folder = package_path / "tests"
return package_path.is_dir() and tests_folder.exists() and tests_folder.is_dir()
def is_llama_index_package(package_path: Path) -> bool:
"""Returns whether a folder contains a 'pyproject.toml' file."""
pyproject = package_path / "pyproject.toml"
return package_path.is_dir() and pyproject.exists() and pyproject.is_file()
def load_pyproject(package_path: Path) -> dict:
"""Thin wrapper around tomli.load()."""
pyproject_path = package_path / "pyproject.toml"
with open(pyproject_path, "rb") as f:
return tomli.load(f)
def find_integrations(root_path: Path, recursive=False) -> list[Path]:
"""Find all integrations packages in the repo."""
package_roots: list[Path] = []
integrations_root = root_path
if not recursive:
integrations_root = integrations_root / "llama-index-integrations"
for category_path in integrations_root.iterdir():
if not category_path.is_dir():
continue
if category_path.name == "storage":
# The "storage" category has sub-folders
package_roots += find_integrations(category_path, recursive=True)
continue
for package_name in category_path.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_packs(root_path: Path) -> list[Path]:
"""Find all llama-index-packs packages in the repo."""
package_roots: list[Path] = []
packs_root = root_path / "llama-index-packs"
for package_name in packs_root.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_utils(root_path: Path) -> list[Path]:
"""Find all llama-index-utils packages in the repo."""
package_roots: list[Path] = []
utils_root = root_path / "llama-index-utils"
for package_name in utils_root.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_all_packages(root_path: Path) -> list[Path]:
"""Returns a list of all the package folders in the monorepo."""
return [
root_path / "llama-index-core",
*find_integrations(root_path),
*find_packs(root_path),
*find_utils(root_path),
root_path / "llama-index-instrumentation",
]
def get_changed_files(repo_root: Path, base_ref: str = "main") -> list[Path]:
"""Use git to get the list of files changed compared to the base branch."""
try:
cmd = ["git", "diff", "--name-only", f"{base_ref}...HEAD"]
result = subprocess.run(cmd, cwd=repo_root, text=True, capture_output=True)
if result.returncode != 0:
raise RuntimeError(f"Git command failed: {result.stderr}")
return [repo_root / Path(f) for f in result.stdout.splitlines() if f.strip()]
except Exception as e:
print(f"Exception occurred: {e!s}")
raise
def get_changed_packages(
changed_files: list[Path], all_packages: list[Path]
) -> set[Path]:
"""Get the list of package folders containing the path in 'changed_files'."""
changed_packages: set[Path] = set()
for file_path in changed_files:
# Find the package containing this file
for pkg_dir in all_packages:
if file_path.absolute().is_relative_to(pkg_dir.absolute()):
changed_packages.add(pkg_dir)
break
return changed_packages
def get_dep_names(pyproject_data: dict) -> set[str]:
"""Load dependencies from pyproject.toml."""
dependencies: set[str] = set()
for dep in pyproject_data["project"]["dependencies"]:
matches = DEP_NAME_REGEX.findall(dep)
if not matches:
continue
dependencies.add(matches[0])
return dependencies
def is_python_version_compatible(pyproject_data: dict) -> bool:
"""Check if the package is compatible with the current Python version using packaging."""
# Get current Python version
current_version = version.Version(
f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
)
# Get Python version requirements if they exist
requires_python = pyproject_data.get("project", {}).get("requires-python")
if requires_python is None:
# If no Python version is specified, assume it's compatible
return True
try:
# Parse the version specifier
spec = specifiers.SpecifierSet(requires_python)
# Check if the current version satisfies the specifier
return spec.contains(str(current_version))
except Exception as e:
# If there's any error in parsing, log it and assume compatibility
print(
f"Warning: Could not parse Python version specifier '{requires_python}': {e}"
)
return True
def get_dependants_packages(
changed_packages: set[Path], all_packages: list[Path]
) -> set[Path]:
"""Get packages containing the files in the changeset."""
changed_packages_names: set[str] = set()
for pkg_path in changed_packages:
pyproject_data = load_pyproject(pkg_path)
changed_packages_names.add(pyproject_data["project"]["name"])
dependants_packages: set[Path] = set()
for pkg_path in all_packages:
pyproject_data = load_pyproject(pkg_path)
for dep_name in get_dep_names(pyproject_data):
if dep_name in changed_packages_names:
dependants_packages.add(pkg_path)
return dependants_packages
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/utils.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-dev/tests/test_utils.py | from pathlib import Path
from unittest import mock
import pytest
from llama_dev.utils import (
find_all_packages,
find_integrations,
find_packs,
find_utils,
get_changed_files,
get_changed_packages,
get_dep_names,
get_dependants_packages,
is_python_version_compatible,
load_pyproject,
package_has_tests,
)
def test_find_integrations(data_path):
assert {p.name for p in find_integrations(data_path)} == {"pkg1", "pkg2"}
def test_find_packs(data_path):
assert {p.name for p in find_packs(data_path)} == {"pack1", "pack2"}
def test_find_utils(data_path):
assert {p.name for p in find_utils(data_path)} == {"util"}
def test_package_has_tests(data_path):
assert not package_has_tests(data_path / "llama-index-packs" / "pack2")
def test_load_pyproject(data_path):
pkg_data = load_pyproject(
data_path / "llama-index-integrations" / "vector_stores" / "pkg1"
)
assert pkg_data["project"]["name"] == "pkg1"
def test_find_all_packages(data_path):
assert len(find_all_packages(data_path)) == 7
@mock.patch("subprocess.run")
def test_get_changed_files(mock_run, tmp_path):
# Mock the subprocess.run result
mock_process = mock.MagicMock()
mock_process.returncode = 0
mock_process.stdout = "file1.py\nfile2.py\n\nfile3.py"
mock_run.return_value = mock_process
result = get_changed_files(tmp_path, "my-branch")
assert result == [
tmp_path / "file1.py",
tmp_path / "file2.py",
tmp_path / "file3.py",
]
mock_run.assert_called_once_with(
["git", "diff", "--name-only", "my-branch...HEAD"],
cwd=tmp_path,
text=True,
capture_output=True,
)
@mock.patch("subprocess.run")
def test_get_changed_files_error(mock_run, tmp_path):
# Mock the subprocess.run failure
mock_process = mock.MagicMock()
mock_process.returncode = 1
mock_process.stderr = "Error in git command"
mock_run.return_value = mock_process
with pytest.raises(RuntimeError):
get_changed_files(tmp_path)
def test_get_changed_packages():
changed_files = [
Path("./llama-index-core/file1.py"),
Path("./llama-index-integrations/vector_stores/pkg1/file2.py"),
Path("./some/other/path/file3.py"),
]
all_packages = [
Path("./llama-index-core"),
Path("./llama-index-integrations/vector_stores/pkg1"),
Path("./llama-index-packs/pkg2"),
]
result = get_changed_packages(changed_files, all_packages)
assert result == {
Path("llama-index-core"),
Path("llama-index-integrations/vector_stores/pkg1"),
}
def test_get_dep_names():
pyproject_data = {
"project": {
"dependencies": [
"numpy>=1.20.0",
"pandas==1.5.0",
"scipy<1.9.0",
"matplotlib",
"requests>=2.25.0,<3.0.0",
"beautifulsoup4>=4.9.3,!=4.10.0",
"urllib3>=1.26.0,<2.0.0,!=1.26.5",
"sqlalchemy[postgresql]>=1.4.0",
"django[bcrypt]",
"colorama; platform_system=='Windows'",
"importlib-metadata; python_version<'3.8'",
" tensorflow >= 2.0.0 ", # extra spaces
"pillow ==9.0.0", # double spaces
"package @ git+https://github.com/user/repo.git",
"local-package @ file:///path/to/package",
"===", # Invalid dependency string with only separators
"", # Empty string
" ", # Just a space
]
}
}
assert get_dep_names(pyproject_data) == {
"numpy",
"pandas",
"scipy",
"matplotlib",
"requests",
"beautifulsoup4",
"urllib3",
"sqlalchemy",
"django",
"colorama",
"importlib-metadata",
"tensorflow",
"pillow",
"package",
"local-package",
}
# Test with empty dependencies
pyproject_data = {"project": {"dependencies": []}}
dependencies = get_dep_names(pyproject_data)
assert dependencies == set()
def test_is_python_version_compatible(mock_current_version):
# Test with missing 'project' section in pyproject data."""
pyproject_data = {}
assert is_python_version_compatible(pyproject_data) is True
# Test when no Python version requirement is specified
pyproject_data = {"project": {}}
assert is_python_version_compatible(pyproject_data) is True
# Test when the current Python version exactly matches the requirement
mock_current_version(3, 8, 0)
pyproject_data = {"project": {"requires-python": "==3.8.0"}}
assert is_python_version_compatible(pyproject_data) is True
# Test when the current Python version is within a specified range
mock_current_version(3, 9, 5)
pyproject_data = {"project": {"requires-python": ">=3.8,<3.11"}}
assert is_python_version_compatible(pyproject_data) is True
# Test when the current Python version is incompatible with requirements
mock_current_version(3, 7, 0)
pyproject_data = {"project": {"requires-python": ">=3.8"}}
assert is_python_version_compatible(pyproject_data) is False
# Test with a complex version specifier
mock_current_version(3, 10, 2)
pyproject_data = {"project": {"requires-python": ">=3.8,!=3.9.0,<3.11"}}
assert is_python_version_compatible(pyproject_data) is True
# Test with an invalid version specifier
pyproject_data = {"project": {"requires-python": "invalid-specifier"}}
assert is_python_version_compatible(pyproject_data) is True
@mock.patch("llama_dev.utils.load_pyproject")
def test_get_dependants_packages(mock_load_pyproject):
# Setup test data
changed_packages = {
Path("llama-index-core"),
Path("llama-index-integrations/vector_stores/pkg1"),
}
all_packages = [
Path("llama-index-core"),
Path("llama-index-integrations/vector_stores/pkg1"),
Path("llama-index-packs/pkg2"),
Path("llama-index-integrations/llm/pkg3"),
]
# Setup mock package names and dependencies
pkg_data = {
"llama-index-core": {
"project": {"name": "llama-index-core", "dependencies": ["bar<2.0"]}
},
"llama-index-integrations/vector_stores/pkg1": {
"project": {
"name": "llama-index-integrations-vector-stores-pkg1",
"dependencies": ["foo==1.0.0"],
}
},
"llama-index-packs/pkg2": {
"project": {
"name": "pkg2",
"dependencies": ["llama-index-core==0.8.0", "numpy<1.20.0"],
}
},
"llama-index-integrations/llm/pkg3": {
"project": {
"name": "pkg3",
"dependencies": [
"llama-index-integrations-vector-stores-pkg1>0.1.0",
"pandas>=1.3.0",
],
}
},
}
# Mock tomli.load to return appropriate project data
def mock_pkg_data(file_path):
for pkg_path, data in pkg_data.items():
if pkg_path in str(file_path):
return data
return {"name": "", "deps": []}
mock_load_pyproject.side_effect = mock_pkg_data
result = get_dependants_packages(changed_packages, all_packages)
assert result == {
Path("llama-index-packs/pkg2"),
Path("llama-index-integrations/llm/pkg3"),
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/test_utils.py",
"license": "MIT License",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/memory/memory.py | import asyncio
import uuid
from abc import abstractmethod
from enum import Enum
from sqlalchemy.ext.asyncio import AsyncEngine
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
TypeVar,
Generic,
cast,
)
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.llms.types import (
ChatMessage,
ContentBlock,
TextBlock,
AudioBlock,
ImageBlock,
VideoBlock,
DocumentBlock,
CachePoint,
CitableBlock,
CitationBlock,
ThinkingBlock,
ToolCallBlock,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
model_validator,
ConfigDict,
)
from llama_index.core.memory.types import BaseMemory
from llama_index.core.prompts import RichPromptTemplate
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore, MessageStatus
from llama_index.core.utils import get_tokenizer
# Define type variable for memory block content
T = TypeVar("T", str, List[ContentBlock], List[ChatMessage])
DEFAULT_TOKEN_LIMIT = 30000
DEFAULT_FLUSH_SIZE = int(DEFAULT_TOKEN_LIMIT * 0.1)
DEFAULT_MEMORY_BLOCKS_TEMPLATE = RichPromptTemplate(
"""
<memory>
{% for (block_name, block_content) in memory_blocks %}
<{{ block_name }}>
{% for block in block_content %}
{% if block.block_type == "text" %}
{{ block.text }}
{% elif block.block_type == "image" %}
{% if block.url %}
{{ (block.url | string) | image }}
{% elif block.path %}
{{ (block.path | string) | image }}
{% endif %}
{% elif block.block_type == "audio" %}
{% if block.url %}
{{ (block.url | string) | audio }}
{% elif block.path %}
{{ (block.path | string) | audio }}
{% endif %}
{% endif %}
{% endfor %}
</{{ block_name }}>
{% endfor %}
</memory>
"""
)
class InsertMethod(Enum):
SYSTEM = "system"
USER = "user"
def generate_chat_store_key() -> str:
"""Generate a unique chat store key."""
return str(uuid.uuid4())
def get_default_chat_store() -> SQLAlchemyChatStore:
"""Get the default chat store."""
return SQLAlchemyChatStore(table_name="llama_index_memory")
class BaseMemoryBlock(BaseModel, Generic[T]):
"""
A base class for memory blocks.
Subclasses must implement the `aget` and `aput` methods.
Optionally, subclasses can implement the `atruncate` method, which is used to reduce the size of the memory block.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name/identifier of the memory block.")
description: Optional[str] = Field(
default=None, description="A description of the memory block."
)
priority: int = Field(
default=0,
description="Priority of this memory block (0 = never truncate, 1 = highest priority, etc.).",
)
accept_short_term_memory: bool = Field(
default=True,
description="Whether to accept puts from messages ejected from the short-term memory.",
)
@abstractmethod
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> T:
"""Pull the memory block (async)."""
async def aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> T:
"""
Pull the memory block (async).
Returns:
T: The memory block content. One of:
- str: A simple text string to be inserted into the template.
- List[ContentBlock]: A list of content blocks to be inserted into the template.
- List[ChatMessage]: A list of chat messages to be directly appended to the chat history.
"""
return await self._aget(messages, **block_kwargs)
@abstractmethod
async def _aput(self, messages: List[ChatMessage]) -> None:
"""Push to the memory block (async)."""
async def aput(
self,
messages: List[ChatMessage],
from_short_term_memory: bool = False,
session_id: Optional[str] = None,
) -> None:
"""Push to the memory block (async)."""
if from_short_term_memory and not self.accept_short_term_memory:
return
if session_id is not None:
for message in messages:
message.additional_kwargs["session_id"] = session_id
await self._aput(messages)
async def atruncate(self, content: T, tokens_to_truncate: int) -> Optional[T]:
"""
Truncate the memory block content to the given token limit.
By default, truncation will remove the entire block content.
Args:
content:
The content of type T, depending on what the memory block returns.
tokens_to_truncate:
The number of tokens requested to truncate the content by.
Blocks may or may not truncate to the exact number of tokens requested, but it
can be used as a hint for the block to truncate.
Returns:
The truncated content of type T, or None if the content is completely truncated.
"""
return None
class Memory(BaseMemory):
"""
A memory module that waterfalls into memory blocks.
Works by orchestrating around
- a FIFO queue of messages
- a list of memory blocks
- various parameters (pressure size, token limit, etc.)
When the FIFO queue reaches the token limit, the oldest messages within the pressure size are ejected from the FIFO queue.
The messages are then processed by each memory block.
When pulling messages from this memory, the memory blocks are processed in order, and the messages are injected into the system message or the latest user message.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
token_limit: int = Field(
default=DEFAULT_TOKEN_LIMIT,
description="The overall token limit of the memory.",
)
token_flush_size: int = Field(
default=DEFAULT_FLUSH_SIZE,
description="The token size to use for flushing the FIFO queue.",
)
chat_history_token_ratio: float = Field(
default=0.7,
description="Minimum percentage ratio of total token limit reserved for chat history.",
)
memory_blocks: List[BaseMemoryBlock] = Field(
default_factory=list,
description="The list of memory blocks to use.",
)
memory_blocks_template: RichPromptTemplate = Field(
default=DEFAULT_MEMORY_BLOCKS_TEMPLATE,
description="The template to use for formatting the memory blocks.",
)
insert_method: InsertMethod = Field(
default=InsertMethod.SYSTEM,
description="Whether to inject memory blocks into a system message or into the latest user message.",
)
image_token_size_estimate: int = Field(
default=256,
description="The token size estimate for images.",
)
audio_token_size_estimate: int = Field(
default=256,
description="The token size estimate for audio.",
)
video_token_size_estimate: int = Field(
default=256,
description="The token size estimate for video.",
)
tokenizer_fn: Callable[[str], List] = Field(
default_factory=get_tokenizer,
exclude=True,
description="The tokenizer function to use for token counting.",
)
sql_store: SQLAlchemyChatStore = Field(
default_factory=get_default_chat_store,
exclude=True,
description="The chat store to use for storing messages.",
)
session_id: str = Field(
default_factory=generate_chat_store_key,
description="The key to use for storing messages in the chat store.",
)
@classmethod
def class_name(cls) -> str:
return "Memory"
@model_validator(mode="before")
@classmethod
def validate_memory(cls, values: dict) -> dict:
# Validate token limit
token_limit = values.get("token_limit", -1)
if token_limit < 1:
raise ValueError("Token limit must be set and greater than 0.")
tokenizer_fn = values.get("tokenizer_fn")
if tokenizer_fn is None:
values["tokenizer_fn"] = get_tokenizer()
if values.get("token_flush_size", -1) < 1:
values["token_flush_size"] = int(token_limit * 0.1)
elif values.get("token_flush_size", -1) > token_limit:
values["token_flush_size"] = int(token_limit * 0.1)
# validate all blocks have unique names
block_names = [block.name for block in values.get("memory_blocks", [])]
if len(block_names) != len(set(block_names)):
raise ValueError("All memory blocks must have unique names.")
return values
@classmethod
def from_defaults( # type: ignore[override]
cls,
session_id: Optional[str] = None,
chat_history: Optional[List[ChatMessage]] = None,
token_limit: int = DEFAULT_TOKEN_LIMIT,
memory_blocks: Optional[List[BaseMemoryBlock[Any]]] = None,
tokenizer_fn: Optional[Callable[[str], List]] = None,
chat_history_token_ratio: float = 0.7,
token_flush_size: int = DEFAULT_FLUSH_SIZE,
memory_blocks_template: RichPromptTemplate = DEFAULT_MEMORY_BLOCKS_TEMPLATE,
insert_method: InsertMethod = InsertMethod.SYSTEM,
image_token_size_estimate: int = 256,
audio_token_size_estimate: int = 256,
video_token_size_estimate: int = 256,
# SQLAlchemyChatStore parameters
table_name: str = "llama_index_memory",
async_database_uri: Optional[str] = None,
async_engine: Optional[AsyncEngine] = None,
db_schema: Optional[str] = None,
) -> "Memory":
"""Initialize Memory."""
session_id = session_id or generate_chat_store_key()
# If not using the SQLAlchemyChatStore, provide an error
sql_store = SQLAlchemyChatStore(
table_name=table_name,
async_database_uri=async_database_uri,
async_engine=async_engine,
db_schema=db_schema,
)
if chat_history is not None:
asyncio_run(sql_store.set_messages(session_id, chat_history))
if token_flush_size > token_limit:
token_flush_size = int(token_limit * 0.7)
return cls(
token_limit=token_limit,
tokenizer_fn=tokenizer_fn or get_tokenizer(),
sql_store=sql_store,
session_id=session_id,
memory_blocks=memory_blocks or [],
chat_history_token_ratio=chat_history_token_ratio,
token_flush_size=token_flush_size,
memory_blocks_template=memory_blocks_template,
insert_method=insert_method,
image_token_size_estimate=image_token_size_estimate,
audio_token_size_estimate=audio_token_size_estimate,
video_token_size_estimate=video_token_size_estimate,
)
def _estimate_token_count(
self,
message_or_blocks: Union[
str, ChatMessage, List[ChatMessage], List[ContentBlock]
],
) -> int:
"""Estimate token count for a message."""
token_count = 0
# Normalize the input to a list of ContentBlocks
if isinstance(message_or_blocks, ChatMessage):
blocks: List[
Union[
TextBlock,
ImageBlock,
VideoBlock,
AudioBlock,
DocumentBlock,
CitableBlock,
CitationBlock,
ThinkingBlock,
]
] = []
for block in message_or_blocks.blocks:
if not isinstance(block, (CachePoint, ToolCallBlock)):
blocks.append(block)
# Estimate the token count for the additional kwargs
if message_or_blocks.additional_kwargs:
token_count += len(
self.tokenizer_fn(str(message_or_blocks.additional_kwargs))
)
elif isinstance(message_or_blocks, List):
# Type narrow the list
messages: List[ChatMessage] = []
if all(isinstance(item, ChatMessage) for item in message_or_blocks):
messages = cast(List[ChatMessage], message_or_blocks)
blocks = []
for msg in messages:
for block in msg.blocks:
if not isinstance(block, (CachePoint, ToolCallBlock)):
blocks.append(block)
# Estimate the token count for the additional kwargs
token_count += sum(
len(self.tokenizer_fn(str(msg.additional_kwargs)))
for msg in messages
if msg.additional_kwargs
)
elif all(
isinstance(
item,
(
TextBlock,
ImageBlock,
AudioBlock,
VideoBlock,
DocumentBlock,
CachePoint,
),
)
for item in message_or_blocks
):
blocks = []
for item in message_or_blocks:
if not isinstance(item, CachePoint):
blocks.append(
cast(
Union[
TextBlock,
ImageBlock,
AudioBlock,
VideoBlock,
DocumentBlock,
],
item,
)
)
else:
raise ValueError(f"Invalid message type: {type(message_or_blocks)}")
elif isinstance(message_or_blocks, str):
blocks = [TextBlock(text=message_or_blocks)]
else:
raise ValueError(f"Invalid message type: {type(message_or_blocks)}")
# Estimate the token count for each block
for block in blocks:
if isinstance(block, TextBlock):
token_count += len(self.tokenizer_fn(block.text))
elif isinstance(block, ImageBlock):
token_count += self.image_token_size_estimate
elif isinstance(block, VideoBlock):
token_count += self.video_token_size_estimate
elif isinstance(block, AudioBlock):
token_count += self.audio_token_size_estimate
return token_count
async def _get_memory_blocks_content(
self,
chat_history: List[ChatMessage],
input: Optional[Union[str, ChatMessage]] = None,
**block_kwargs: Any,
) -> Dict[str, Any]:
"""Get content from memory blocks in priority order."""
content_per_memory_block: Dict[str, Any] = {}
block_input = chat_history
if isinstance(input, str):
block_input = [*chat_history, ChatMessage(role="user", content=input)]
# Process memory blocks in priority order
for memory_block in sorted(self.memory_blocks, key=lambda x: -x.priority):
content = await memory_block.aget(
block_input, session_id=self.session_id, **block_kwargs
)
# Handle different return types from memory blocks
if content and isinstance(content, list):
# Memory block returned content blocks
content_per_memory_block[memory_block.name] = content
elif content and isinstance(content, str):
# Memory block returned a string
content_per_memory_block[memory_block.name] = content
elif not content:
continue
else:
raise ValueError(
f"Invalid content type received from memory block {memory_block.name}: {type(content)}"
)
return content_per_memory_block
async def _truncate_memory_blocks(
self,
content_per_memory_block: Dict[str, Any],
memory_blocks_tokens: int,
chat_history_tokens: int,
) -> Dict[str, Any]:
"""Truncate memory blocks if total token count exceeds limit."""
if memory_blocks_tokens + chat_history_tokens <= self.token_limit:
return content_per_memory_block
tokens_to_truncate = (
memory_blocks_tokens + chat_history_tokens - self.token_limit
)
truncated_content = content_per_memory_block.copy()
# Truncate memory blocks based on priority
for memory_block in sorted(
self.memory_blocks, key=lambda x: x.priority
): # Lower priority first
# Skip memory blocks with priority 0, they should never be truncated
if memory_block.priority == 0:
continue
if tokens_to_truncate <= 0:
break
# Truncate content and measure tokens saved
content = truncated_content.get(memory_block.name, [])
truncated_block_content = await memory_block.atruncate(
content, tokens_to_truncate
)
# Calculate tokens saved
original_tokens = self._estimate_token_count(content)
if truncated_block_content is None:
new_tokens = 0
else:
new_tokens = self._estimate_token_count(truncated_block_content)
tokens_saved = original_tokens - new_tokens
tokens_to_truncate -= tokens_saved
# Update the content blocks
if truncated_block_content is None:
truncated_content[memory_block.name] = []
else:
truncated_content[memory_block.name] = truncated_block_content
# handle case where we still have tokens to truncate
# just remove the blocks starting from the least priority
for memory_block in sorted(self.memory_blocks, key=lambda x: x.priority):
if memory_block.priority == 0:
continue
if tokens_to_truncate <= 0:
break
# Truncate content and measure tokens saved
content = truncated_content.pop(memory_block.name)
tokens_to_truncate -= self._estimate_token_count(content)
return truncated_content
async def _format_memory_blocks(
self, content_per_memory_block: Dict[str, Any]
) -> Tuple[List[Tuple[str, List[ContentBlock]]], List[ChatMessage]]:
"""Format memory blocks content into template data and chat messages."""
memory_blocks_data: List[Tuple[str, List[ContentBlock]]] = []
chat_message_data: List[ChatMessage] = []
for block in self.memory_blocks:
if block.name in content_per_memory_block:
content = content_per_memory_block[block.name]
# Skip empty memory blocks
if not content:
continue
if (
isinstance(content, list)
and content
and isinstance(content[0], ChatMessage)
):
chat_message_data.extend(content)
elif isinstance(content, str):
memory_blocks_data.append((block.name, [TextBlock(text=content)]))
else:
memory_blocks_data.append((block.name, content))
return memory_blocks_data, chat_message_data
def _insert_memory_content(
self,
chat_history: List[ChatMessage],
memory_content: List[ContentBlock],
chat_message_data: List[ChatMessage],
) -> List[ChatMessage]:
"""Insert memory content into chat history based on insert method."""
result = chat_history.copy()
# Process chat messages
if chat_message_data:
result = [*chat_message_data, *result]
# Process template-based memory blocks
if memory_content:
if self.insert_method == InsertMethod.SYSTEM:
# Find system message or create a new one
system_idx = next(
(i for i, msg in enumerate(result) if msg.role == "system"), None
)
if system_idx is not None:
# Update existing system message
result[system_idx].blocks = [
*memory_content,
*result[system_idx].blocks,
]
else:
# Create new system message at the beginning
result.insert(0, ChatMessage(role="system", blocks=memory_content))
elif self.insert_method == InsertMethod.USER:
# Find the latest user message
session_idx = next(
(i for i, msg in enumerate(reversed(result)) if msg.role == "user"),
None,
)
if session_idx is not None:
# Get actual index (since we enumerated in reverse)
actual_idx = len(result) - 1 - session_idx
# Update existing user message
result[actual_idx].blocks = [
*memory_content,
*result[actual_idx].blocks,
]
else:
result.append(ChatMessage(role="user", blocks=memory_content))
return result
async def aget(
self, input: Optional[Union[str, ChatMessage]] = None, **block_kwargs: Any
) -> List[ChatMessage]: # type: ignore[override]
"""Get messages with memory blocks included (async)."""
# Get chat history efficiently
chat_history = await self.sql_store.get_messages(
self.session_id, status=MessageStatus.ACTIVE
)
chat_history_tokens = sum(
self._estimate_token_count(message) for message in chat_history
)
# Get memory blocks content
content_per_memory_block = await self._get_memory_blocks_content(
chat_history, input=input, **block_kwargs
)
# Calculate memory blocks tokens
memory_blocks_tokens = sum(
self._estimate_token_count(content)
for content in content_per_memory_block.values()
)
# Handle truncation if needed
truncated_content = await self._truncate_memory_blocks(
content_per_memory_block, memory_blocks_tokens, chat_history_tokens
)
# Format template-based memory blocks
memory_blocks_data, chat_message_data = await self._format_memory_blocks(
truncated_content
)
# Create messages from template content
memory_content = []
if memory_blocks_data:
memory_block_messages = self.memory_blocks_template.format_messages(
memory_blocks=memory_blocks_data
)
memory_content = (
memory_block_messages[0].blocks if memory_block_messages else []
)
# Insert memory content into chat history
return self._insert_memory_content(
chat_history, memory_content, chat_message_data
)
async def _manage_queue(self) -> None:
"""
Manage the FIFO queue.
This function manages the memory queue using a waterfall approach:
1. If the queue exceeds the token limit, it removes oldest messages first
2. Removed messages are archived and passed to memory blocks
3. It ensures conversation integrity by keeping related messages together
4. It maintains at least one complete conversation turn
"""
# Calculate if we need to waterfall
current_queue = await self.sql_store.get_messages(
self.session_id, status=MessageStatus.ACTIVE
)
# If current queue is empty, return
if not current_queue:
return
tokens_in_current_queue = sum(
self._estimate_token_count(message) for message in current_queue
)
# If we're over the token limit, initiate waterfall
token_limit = self.token_limit * self.chat_history_token_ratio
if tokens_in_current_queue > token_limit:
# Process from oldest to newest, but efficiently with pop() operations
reversed_queue = current_queue[::-1] # newest first, oldest last
# Calculate approximate number of messages to remove
tokens_to_remove = tokens_in_current_queue - token_limit
while tokens_to_remove > 0:
# If only one message left, keep it regardless of token count
if len(reversed_queue) <= 1:
break
# Collect messages to flush (up to flush size)
messages_to_flush = []
flushed_tokens = 0
# Remove oldest messages (from end of reversed list) until reaching flush size
while (
flushed_tokens < self.token_flush_size
and reversed_queue
and len(reversed_queue) > 1
):
message = reversed_queue.pop()
messages_to_flush.append(message)
flushed_tokens += self._estimate_token_count(message)
# Ensure we keep at least one message
if not reversed_queue and messages_to_flush:
reversed_queue.append(messages_to_flush.pop())
# We need to maintain conversation integrity
# Messages should be removed in complete conversation turns
chronological_view = reversed_queue[::-1] # View in chronological order
# Find the correct conversation boundary
# We want the first message in our remaining queue to be a user message
# and the last message to be from assistant or tool
if chronological_view:
# Keep removing messages until first remaining message is from user
# This ensures we start with a user message
while (
chronological_view
and chronological_view[0].role != "user"
and len(reversed_queue) > 1
):
if reversed_queue:
messages_to_flush.append(reversed_queue.pop())
chronological_view = reversed_queue[::-1]
else:
break
# If we end up with an empty queue or only a non-user message,
# keep at least one full conversation turn
if (
not reversed_queue
or (
len(reversed_queue) == 1
and reversed_queue[0].role != "user"
)
) and messages_to_flush:
# If reversed_queue has a non-user message, move it to messages_to_flush
if reversed_queue and reversed_queue[0].role != "user":
messages_to_flush.append(reversed_queue.pop(0))
# Find the most recent complete conversation turn in messages_to_flush
# A complete turn is: user message + all subsequent assistant/tool responses
# This correctly handles tool calling: user → assistant → tool → assistant
# Search from end to find the last user message
turn_start_idx = -1
for i in range(len(messages_to_flush) - 1, -1, -1):
if messages_to_flush[i].role == "user":
turn_start_idx = i
break
# If we found a user message, keep everything from that user to the end
if turn_start_idx >= 0:
turn_messages = messages_to_flush[turn_start_idx:]
# Keep only messages before the turn for flushing
messages_to_flush = messages_to_flush[:turn_start_idx]
# Add the complete turn back to the queue
reversed_queue = turn_messages[::-1] + reversed_queue
# else: No user message found - queue may remain empty (defensive)
# Archive the flushed messages
if messages_to_flush:
await self.sql_store.archive_oldest_messages(
self.session_id, n=len(messages_to_flush)
)
# Waterfall the flushed messages to memory blocks
await asyncio.gather(
*[
block.aput(
messages_to_flush,
from_short_term_memory=True,
session_id=self.session_id,
)
for block in self.memory_blocks
]
)
# Recalculate remaining tokens
chronological_view = reversed_queue[::-1]
tokens_in_current_queue = sum(
self._estimate_token_count(message)
for message in chronological_view
)
tokens_to_remove = tokens_in_current_queue - token_limit
# Exit if we've flushed everything possible but still over limit
if not messages_to_flush:
break
async def aput(self, message: ChatMessage) -> None:
"""Add a message to the chat store and process waterfall logic if needed."""
# Add the message to the chat store
await self.sql_store.add_message(
self.session_id, message, status=MessageStatus.ACTIVE
)
# Ensure the active queue is managed
await self._manage_queue()
async def aput_messages(self, messages: List[ChatMessage]) -> None:
"""Add a list of messages to the chat store and process waterfall logic if needed."""
# Add the messages to the chat store
await self.sql_store.add_messages(
self.session_id, messages, status=MessageStatus.ACTIVE
)
# Ensure the active queue is managed
await self._manage_queue()
async def aset(self, messages: List[ChatMessage]) -> None:
"""Set the chat history."""
await self.sql_store.set_messages(
self.session_id, messages, status=MessageStatus.ACTIVE
)
async def aget_all(
self, status: Optional[MessageStatus] = None
) -> List[ChatMessage]:
"""Get all messages."""
return await self.sql_store.get_messages(self.session_id, status=status)
async def areset(self, status: Optional[MessageStatus] = None) -> None:
"""Reset the memory."""
await self.sql_store.delete_messages(self.session_id, status=status)
# ---- Sync method wrappers ----
def get(
self, input: Optional[Union[str, ChatMessage]] = None, **block_kwargs: Any
) -> List[ChatMessage]: # type: ignore[override]
"""Get messages with memory blocks included."""
return asyncio_run(self.aget(input=input, **block_kwargs))
def get_all(self, status: Optional[MessageStatus] = None) -> List[ChatMessage]:
"""Get all messages."""
return asyncio_run(self.aget_all(status=status))
def put(self, message: ChatMessage) -> None:
"""Add a message to the chat store and process waterfall logic if needed."""
return asyncio_run(self.aput(message))
def put_messages(self, messages: List[ChatMessage]) -> None:
"""Add a list of messages to the chat store and process waterfall logic if needed."""
return asyncio_run(self.aput_messages(messages))
def set(self, messages: List[ChatMessage]) -> None:
"""Set the chat history."""
return asyncio_run(self.aset(messages))
def reset(self) -> None:
"""Reset the memory."""
return asyncio_run(self.areset())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/memory/memory.py",
"license": "MIT License",
"lines": 731,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/llama_index/core/storage/chat_store/base_db.py | from abc import abstractmethod
from typing import List, Optional
from enum import Enum
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.bridge.pydantic import BaseModel
class MessageStatus(str, Enum):
"""Status of a message in the chat store."""
# Message is in the active FIFO queue
ACTIVE = "active"
# Message has been processed and is archived, removed from the active queue
ARCHIVED = "archived"
class AsyncDBChatStore(BaseModel):
"""
Base class for DB-based chat stores.
Meant to implement a FIFO queue to manage short-term memory and
general conversation history.
"""
@abstractmethod
async def get_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[ChatMessage]:
"""
Get all messages for a key with the specified status (async).
Returns a list of messages.
"""
@abstractmethod
async def count_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
) -> int:
"""Count messages for a key with the specified status (async)."""
@abstractmethod
async def add_message(
self,
key: str,
message: ChatMessage,
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a message for a key with the specified status (async)."""
@abstractmethod
async def add_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a list of messages in batch for the specified key and status (async)."""
@abstractmethod
async def set_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Set all messages for a key (replacing existing ones) with the specified status (async)."""
@abstractmethod
async def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete a specific message by ID and return it (async)."""
@abstractmethod
async def delete_messages(
self, key: str, status: Optional[MessageStatus] = None
) -> None:
"""Delete all messages for a key with the specified status (async)."""
@abstractmethod
async def delete_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Delete the oldest n messages for a key and return them (async)."""
@abstractmethod
async def archive_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Archive the oldest n messages for a key and return them (async)."""
@abstractmethod
async def get_keys(self) -> List[str]:
"""Get all unique keys in the store (async)."""
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "AsyncDBChatStore"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/storage/chat_store/base_db.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/llama_index/core/storage/chat_store/sql.py | import time
from typing import Any, Dict, List, Optional, Tuple
from sqlalchemy import (
JSON,
Column,
BigInteger,
Integer,
MetaData,
String,
Table,
delete,
select,
insert,
update,
text,
)
from sqlalchemy.ext.asyncio import (
AsyncEngine,
AsyncSession,
create_async_engine,
)
from sqlalchemy.orm import declarative_base, sessionmaker
from llama_index.core.async_utils import asyncio_run
from llama_index.core.bridge.pydantic import Field, PrivateAttr, model_serializer
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.storage.chat_store.base_db import AsyncDBChatStore, MessageStatus
DEFAULT_ASYNC_DATABASE_URI = "sqlite+aiosqlite:///:memory:"
Base = declarative_base()
class SQLAlchemyChatStore(AsyncDBChatStore):
"""
Base class for SQLAlchemy-based chat stores.
This class provides a foundation for creating chat stores that use SQLAlchemy
to interact with SQL databases. It handles common operations like managing
sessions, creating tables, and CRUD operations on chat messages.
Enhanced with status tracking for better FIFO queue management for short-term memory.
This class is meant to replace all other chat store classes.
"""
table_name: str = Field(description="Name of the table to store messages")
async_database_uri: str = Field(
default=DEFAULT_ASYNC_DATABASE_URI,
description="SQLAlchemy async connection URI",
)
db_schema: Optional[str] = Field(
default=None,
description="Database schema name (for PostgreSQL and other databases that support schemas)",
)
_async_engine: Optional[AsyncEngine] = PrivateAttr(default=None)
_async_session_factory: Optional[sessionmaker] = PrivateAttr(default=None)
_metadata: MetaData = PrivateAttr(default_factory=MetaData)
_table: Optional[Table] = PrivateAttr(default=None)
_db_data: Optional[List[Dict[str, Any]]] = PrivateAttr(default=None)
def __init__(
self,
table_name: str,
async_database_uri: Optional[str] = DEFAULT_ASYNC_DATABASE_URI,
async_engine: Optional[AsyncEngine] = None,
db_data: Optional[List[Dict[str, Any]]] = None,
db_schema: Optional[str] = None,
):
"""Initialize the SQLAlchemy chat store."""
super().__init__(
table_name=table_name,
async_database_uri=async_database_uri or DEFAULT_ASYNC_DATABASE_URI,
db_schema=db_schema,
)
self._async_engine = async_engine
self._db_data = db_data
@staticmethod
def _is_in_memory_uri(uri: Optional[str]) -> bool:
"""Check if the URI points to an in-memory SQLite database."""
# Handles both :memory: and empty path which also means in-memory for sqlite
return uri == "sqlite+aiosqlite:///:memory:" or uri == "sqlite+aiosqlite://"
def _is_sqlite_database(self) -> bool:
"""Check if the database is SQLite (which doesn't support schemas)."""
if self._async_engine is not None:
return str(self._async_engine.url).startswith("sqlite")
return self.async_database_uri.startswith("sqlite")
async def _initialize(self) -> Tuple[sessionmaker, Table]:
"""Initialize the chat store. Used to avoid HTTP connections in constructor."""
if self._async_session_factory is not None and self._table is not None:
return self._async_session_factory, self._table
async_engine, async_session_factory = await self._setup_connections()
table = await self._setup_tables(async_engine)
# Restore data from in-memory database if provided
if self._db_data:
async with async_session_factory() as session:
await session.execute(insert(table).values(self._db_data))
await session.commit()
# clear the data after it's inserted
self._db_data = None
return async_session_factory, table
async def _setup_connections(
self,
) -> Tuple[AsyncEngine, sessionmaker]:
"""Set up database connections and session factories."""
# Create async engine and session factory if async URI is provided
if self._async_session_factory is not None and self._async_engine is not None:
return self._async_engine, self._async_session_factory
elif self.async_database_uri or self._async_engine:
self._async_engine = self._async_engine or create_async_engine(
self.async_database_uri
)
if self.async_database_uri is None:
self.async_database_uri = self._async_engine.url
self._async_session_factory = sessionmaker( # type: ignore
bind=self._async_engine, expire_on_commit=False, class_=AsyncSession
)
return self._async_engine, self._async_session_factory # type: ignore
else:
raise ValueError(
"No async database URI or engine provided, cannot initialize DB sessionmaker"
)
async def _setup_tables(self, async_engine: AsyncEngine) -> Table:
"""Set up database tables."""
# Create metadata with schema
if self.db_schema is not None and not self._is_sqlite_database():
# Only set schema for databases that support it
self._metadata = MetaData(schema=self.db_schema)
# Create schema if it doesn't exist (PostgreSQL, SQL Server, etc.)
async with async_engine.begin() as conn:
await conn.execute(
text(f'CREATE SCHEMA IF NOT EXISTS "{self.db_schema}"')
)
# Create messages table with status column
self._table = Table(
f"{self.table_name}",
self._metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("key", String, nullable=False, index=True),
Column("timestamp", BigInteger, nullable=False, index=True),
Column("role", String, nullable=False),
Column(
"status",
String,
nullable=False,
default=MessageStatus.ACTIVE.value,
index=True,
),
Column("data", JSON, nullable=False),
)
# Create tables in the database
async with async_engine.begin() as conn:
await conn.run_sync(self._metadata.create_all)
return self._table
async def get_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[ChatMessage]:
"""
Get all messages for a key with the specified status (async).
Returns a list of messages.
"""
session_factory, table = await self._initialize()
query = select(table).where(table.c.key == key)
if limit is not None:
query = query.limit(limit)
if offset is not None:
query = query.offset(offset)
if status is not None:
query = query.where(table.c.status == status.value)
async with session_factory() as session:
result = await session.execute(
query.order_by(table.c.timestamp, table.c.id)
)
rows = result.fetchall()
return [ChatMessage.model_validate(row.data) for row in rows]
async def count_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
) -> int:
"""Count messages for a key with the specified status (async)."""
session_factory, table = await self._initialize()
query = select(table.c.id).where(table.c.key == key)
if status is not None:
query = query.where(table.c.status == status.value)
async with session_factory() as session:
result = await session.execute(query)
rows = result.fetchall()
return len(rows)
async def add_message(
self,
key: str,
message: ChatMessage,
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a message for a key with the specified status (async)."""
session_factory, table = await self._initialize()
async with session_factory() as session:
await session.execute(
insert(table).values(
key=key,
timestamp=time.time_ns(),
role=message.role,
status=status.value,
data=message.model_dump(mode="json"),
)
)
await session.commit()
async def add_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a list of messages in batch for the specified key and status (async)."""
session_factory, table = await self._initialize()
async with session_factory() as session:
await session.execute(
insert(table).values(
[
{
"key": key,
"timestamp": time.time_ns() + i,
"role": message.role,
"status": status.value,
"data": message.model_dump(mode="json"),
}
for i, message in enumerate(messages)
]
)
)
await session.commit()
async def set_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Set all messages for a key (replacing existing ones) with the specified status (async)."""
session_factory, table = await self._initialize()
# First delete all existing messages
await self.delete_messages(key)
# Then add new messages
current_time = time.time_ns()
async with session_factory() as session:
for i, message in enumerate(messages):
await session.execute(
insert(table).values(
key=key,
# Preserve order with incremental timestamps
timestamp=current_time + i,
role=message.role,
status=status.value,
data=message.model_dump(mode="json"),
)
)
await session.commit()
async def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete a specific message by ID and return it (async)."""
session_factory, table = await self._initialize()
async with session_factory() as session:
# First get the message
result = await session.execute(
select(table).where(table.c.key == key, table.c.id == idx)
)
row = result.fetchone()
if not row:
return None
# Store the message we're about to delete
message = ChatMessage.model_validate(row.data)
# Delete the message
await session.execute(delete(table).where(table.c.id == idx))
await session.commit()
return message
async def delete_messages(
self, key: str, status: Optional[MessageStatus] = None
) -> None:
"""Delete all messages for a key with the specified status (async)."""
session_factory, table = await self._initialize()
query = delete(table).where(table.c.key == key)
if status is not None:
query = query.where(table.c.status == status.value)
async with session_factory() as session:
await session.execute(query)
await session.commit()
async def delete_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Delete the oldest n messages for a key and return them (async)."""
session_factory, table = await self._initialize()
oldest_messages = []
async with session_factory() as session:
# First get the oldest n messages
result = await session.execute(
select(table)
.where(
table.c.key == key,
table.c.status == MessageStatus.ACTIVE.value,
)
.order_by(table.c.timestamp, table.c.id)
.limit(n)
)
rows = result.fetchall()
if not rows:
return []
# Store the messages we're about to delete
oldest_messages = [ChatMessage.model_validate(row.data) for row in rows]
# Get the IDs to delete
ids_to_delete = [row.id for row in rows]
# Delete the messages
await session.execute(delete(table).where(table.c.id.in_(ids_to_delete)))
await session.commit()
return oldest_messages
async def archive_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Archive the oldest n messages for a key and return them (async)."""
session_factory, table = await self._initialize()
async with session_factory() as session:
# First get the oldest n messages
result = await session.execute(
select(table)
.where(
table.c.key == key,
table.c.status == MessageStatus.ACTIVE.value,
)
.order_by(table.c.timestamp, table.c.id)
.limit(n)
)
rows = result.fetchall()
if not rows:
return []
# Store the messages we're about to archive
archived_messages = [ChatMessage.model_validate(row.data) for row in rows]
# Get the IDs to archive
ids_to_archive = [row.id for row in rows]
# Update message status to archived
await session.execute(
update(table)
.where(table.c.id.in_(ids_to_archive))
.values(status=MessageStatus.ARCHIVED.value)
)
await session.commit()
return archived_messages
async def get_keys(self) -> List[str]:
"""Get all unique keys in the store (async)."""
session_factory, table = await self._initialize()
async with session_factory() as session:
result = await session.execute(select(table.c.key).distinct())
return [row[0] for row in result.fetchall()]
async def _dump_db_data(self) -> List[Dict[str, Any]]:
"""Dump the data from the database."""
session_factory, table = await self._initialize()
async with session_factory() as session:
result = await session.execute(select(table))
rows = result.fetchall()
return [
{
"key": row.key,
"timestamp": row.timestamp,
"role": row.role,
"status": row.status,
"data": row.data,
}
for row in rows
]
@model_serializer()
def dump_store(self) -> dict:
"""
Dump the store's configuration and data (if in-memory).
Returns:
A dictionary containing the store's configuration and potentially its data.
"""
dump_data = {
"table_name": self.table_name,
"async_database_uri": self.async_database_uri,
"db_schema": self.db_schema,
}
if self._is_in_memory_uri(self.async_database_uri):
# switch to sync sqlite
dump_data["db_data"] = asyncio_run(self._dump_db_data())
return dump_data
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "SQLAlchemyChatStore"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/storage/chat_store/sql.py",
"license": "MIT License",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/tests/memory/test_memory_base.py | import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ImageBlock,
AudioBlock,
VideoBlock,
)
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import MessageStatus
@pytest.fixture()
def memory():
"""Create a basic memory instance for testing."""
return Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_user",
)
@pytest.mark.asyncio
async def test_initialization(memory):
"""Test that memory initializes correctly."""
assert memory.token_limit == 1000
assert memory.token_flush_size == 700
assert memory.session_id == "test_user"
@pytest.mark.asyncio
async def test_estimate_token_count_text(memory):
"""Test token counting for text."""
message = ChatMessage(role="user", content="Test message")
count = memory._estimate_token_count(message)
assert count == len(memory.tokenizer_fn("Test message"))
@pytest.mark.asyncio
async def test_estimate_token_count_image(memory):
"""Test token counting for images."""
block = ImageBlock(url="http://example.com/image.jpg")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.image_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_video(memory):
"""Test token counting for images."""
block = VideoBlock(url="http://example.com/video.mp4")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.video_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_audio(memory):
"""Test token counting for audio."""
block = AudioBlock(url="http://example.com/audio.mp3")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.audio_token_size_estimate
@pytest.mark.asyncio
async def test_manage_queue_under_limit(memory):
"""Test queue management when under token limit."""
# Set up a case where we're under the token limit
chat_messages = [ChatMessage(role="user", content="Short message")]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert cur_messages[0].content == "Short message"
@pytest.mark.asyncio
async def test_manage_queue_over_limit(memory):
"""Test queue management when over token limit."""
# Set up a case where we're over the token limit
chat_messages = [
ChatMessage(role="user", content="x " * 500),
ChatMessage(role="assistant", content="y " * 500),
ChatMessage(role="user", content="z " * 500),
]
# This will exceed the token limit and flush 700 tokens (two messages)
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert "z " in cur_messages[0].content
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Should add the message to the store
messages = await memory.aget()
assert len(messages) == 1
assert messages[0].content == "New message"
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Should add the messages to the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aset(memory):
"""Test setting the chat history."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aset(messages)
# Should set the messages in the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aget_all(memory):
"""Test getting all messages."""
await memory.aput_messages(
[
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
)
messages = await memory.aget_all(status=MessageStatus.ACTIVE)
# Should get all messages from the store
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_areset(memory):
"""Test resetting the memory."""
await memory.aput(ChatMessage(role="user", content="New message"))
await memory.areset(status=MessageStatus.ACTIVE)
# Should delete messages from the store
messages = await memory.aget()
assert len(messages) == 0
@pytest.mark.asyncio
async def test_manage_queue_first_message_must_be_user():
"""
Test that after flushing, the first message in queue is always a user message.
This tests the edge case where token limits are set low enough that
flushing could leave only an assistant message, which would break
providers like Amazon Bedrock that require user message first.
"""
# Create memory with very low limits to trigger the edge case
# token_limit * chat_history_token_ratio = 100 * 0.5 = 50 tokens for chat history
memory = Memory(
token_limit=100,
token_flush_size=50,
chat_history_token_ratio=0.5,
session_id="test_first_message_user",
)
# Create messages where user message is large and assistant message is small
# This simulates a tool call scenario where the tool returns a lot of content
# After flush, only the small assistant message might remain
chat_messages = [
ChatMessage(
role="user", content="x " * 100
), # Large user message (~100 tokens)
ChatMessage(
role="assistant", content="ok"
), # Small assistant message (~1 token)
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
# The queue should not be empty
assert len(cur_messages) > 0, "Queue should not be empty after flush"
# The first message MUST be a user message
assert cur_messages[0].role == "user", (
f"First message must be 'user', but got '{cur_messages[0].role}'. "
"This would break providers like Amazon Bedrock."
)
@pytest.mark.asyncio
async def test_manage_queue_preserves_conversation_turn():
"""Test that flushing preserves at least one complete conversation turn."""
memory = Memory(
token_limit=200,
token_flush_size=100,
chat_history_token_ratio=0.5,
session_id="test_preserve_turn",
)
# Multiple conversation turns
chat_messages = [
ChatMessage(role="user", content="a " * 50),
ChatMessage(role="assistant", content="b " * 50),
ChatMessage(role="user", content="c " * 50),
ChatMessage(role="assistant", content="d " * 50),
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
# Should have at least one complete turn (user + assistant)
assert len(cur_messages) >= 2, (
"Should preserve at least one complete conversation turn"
)
# First message must be user
assert cur_messages[0].role == "user"
# Verify alternating pattern
for i in range(len(cur_messages) - 1):
if cur_messages[i].role == "user":
assert cur_messages[i + 1].role in ("assistant", "tool")
@pytest.mark.asyncio
async def test_manage_queue_with_tool_messages():
"""
Test that flushing correctly handles tool calling scenarios.
In tool calling, the message sequence is:
user → assistant (tool_call) → tool → assistant
The recovery logic should keep the complete turn together.
"""
memory = Memory(
token_limit=150,
token_flush_size=80,
chat_history_token_ratio=0.5,
session_id="test_tool_calling",
)
# Simulate a tool calling scenario
chat_messages = [
ChatMessage(role="user", content="a " * 40), # ~40 tokens
ChatMessage(role="assistant", content="b " * 20), # ~20 tokens (with tool_call)
ChatMessage(role="tool", content="c " * 20), # ~20 tokens
ChatMessage(role="assistant", content="d " * 20), # ~20 tokens (final response)
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
# Should preserve at least the user message
assert len(cur_messages) > 0, "Queue should not be empty"
assert cur_messages[0].role == "user", "First message must be user"
# If we have tool messages, they should be preceded by assistant
for i, msg in enumerate(cur_messages):
if msg.role == "tool":
assert i > 0, "Tool message should not be first"
# Tool messages should come after an assistant message
assert cur_messages[i - 1].role in ("assistant", "tool"), (
"Tool message should follow assistant or another tool"
)
@pytest.mark.asyncio
async def test_manage_queue_only_tool_message_remaining():
"""
Test edge case where only a tool message would remain after flush.
This can happen with very low token limits. The recovery should
find the preceding user message and keep the complete turn.
"""
memory = Memory(
token_limit=80,
token_flush_size=40,
chat_history_token_ratio=0.5, # Effective limit: 40 tokens
session_id="test_only_tool",
)
# Large user message, small tool response
chat_messages = [
ChatMessage(role="user", content="x " * 50), # ~50 tokens
ChatMessage(role="assistant", content="call"), # ~1 token
ChatMessage(role="tool", content="result"), # ~1 token
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
# The queue should either:
# 1. Have a complete turn starting with user, OR
# 2. Be empty (if no recovery possible)
if len(cur_messages) > 0:
assert cur_messages[0].role == "user", (
f"First message must be 'user', got '{cur_messages[0].role}'"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/test_memory_base.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/memory/test_memory_blocks_base.py | import pytest
from typing import List, Any, Optional, Union
from llama_index.core.base.llms.types import ChatMessage, TextBlock, ContentBlock
from llama_index.core.memory.memory import Memory, BaseMemoryBlock, InsertMethod
class TextMemoryBlock(BaseMemoryBlock[str]):
"""Memory block that returns text content."""
async def _aget(self, messages: List[ChatMessage], **kwargs: Any) -> str:
return "Simple text content from TextMemoryBlock"
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
class ContentBlocksMemoryBlock(BaseMemoryBlock[List[ContentBlock]]):
"""Memory block that returns content blocks."""
async def _aget(
self, messages: List[ChatMessage], **kwargs: Any
) -> List[ContentBlock]:
return [
TextBlock(text="Text block 1"),
TextBlock(text="Text block 2"),
]
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
async def atruncate(
self, content: List[ContentBlock], tokens_to_truncate: int
) -> Optional[List[ContentBlock]]:
# Simple truncation - remove last block
if not content:
return None
return content[:-1]
class ChatMessagesMemoryBlock(BaseMemoryBlock[List[ChatMessage]]):
"""Memory block that returns chat messages."""
async def _aget(
self, messages: List[ChatMessage], **kwargs: Any
) -> List[ChatMessage]:
return [
ChatMessage(role="user", content="Historical user message"),
ChatMessage(role="assistant", content="Historical assistant response"),
]
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
class ComplexMemoryBlock(BaseMemoryBlock[Union[str, List[ContentBlock]]]):
"""Memory block that can return different types based on input."""
mode: str = "text" # Can be "text" or "blocks"
async def _aget(
self, messages: List[ChatMessage], **kwargs: Any
) -> Union[str, List[ContentBlock]]:
if self.mode == "text":
return "Text content from ComplexMemoryBlock"
else:
return [
TextBlock(text="Complex block 1"),
TextBlock(text="Complex block 2"),
]
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
class ParameterizedMemoryBlock(BaseMemoryBlock[str]):
"""Memory block that uses parameters passed to aget."""
async def _aget(self, messages: List[ChatMessage], **kwargs: Any) -> str:
# Use parameters passed to aget
parameter = kwargs.get("test_parameter", "default")
return f"Parameter value: {parameter}"
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
@pytest.fixture()
def memory_with_blocks():
"""Set up memory with different types of memory blocks."""
return Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_blocks",
memory_blocks=[
TextMemoryBlock(name="text_block", priority=1),
ContentBlocksMemoryBlock(name="content_blocks", priority=2),
ChatMessagesMemoryBlock(name="chat_messages", priority=3),
ComplexMemoryBlock(name="complex_block", priority=4),
ParameterizedMemoryBlock(name="param_block", priority=5),
],
)
@pytest.mark.asyncio
async def test_text_memory_block(memory_with_blocks):
"""Test text memory block integration."""
# Get the memory block content
content = await memory_with_blocks._get_memory_blocks_content([])
# Verify text block content
assert "text_block" in content
assert content["text_block"] == "Simple text content from TextMemoryBlock"
# Format content for insertion
formatted_blocks, _ = await memory_with_blocks._format_memory_blocks(
{"text_block": content["text_block"]}
)
# Check formatting
assert len(formatted_blocks) == 1
block_name, content = formatted_blocks[0]
assert block_name == "text_block"
assert len(content) == 1
assert content[0].text == "Simple text content from TextMemoryBlock"
@pytest.mark.asyncio
async def test_content_blocks_memory_block(memory_with_blocks):
"""Test content blocks memory block integration."""
# Get the memory block content
content = await memory_with_blocks._get_memory_blocks_content([])
# Verify content blocks block content
assert "content_blocks" in content
assert len(content["content_blocks"]) == 2
assert content["content_blocks"][0].text == "Text block 1"
assert content["content_blocks"][1].text == "Text block 2"
# Format content for insertion
formatted_blocks, _ = await memory_with_blocks._format_memory_blocks(
{"content_blocks": content["content_blocks"]}
)
# Check formatting
assert len(formatted_blocks) == 1
block_name, content = formatted_blocks[0]
assert block_name == "content_blocks"
assert len(content) == 2
assert content[0].text == "Text block 1"
assert content[1].text == "Text block 2"
@pytest.mark.asyncio
async def test_chat_messages_memory_block(memory_with_blocks):
"""Test chat messages memory block integration."""
# Get the memory block content
content = await memory_with_blocks._get_memory_blocks_content([])
# Verify chat messages block content
assert "chat_messages" in content
assert len(content["chat_messages"]) == 2
assert content["chat_messages"][0].role == "user"
assert content["chat_messages"][0].content == "Historical user message"
assert content["chat_messages"][1].role == "assistant"
assert content["chat_messages"][1].content == "Historical assistant response"
# Format content for insertion
formatted_blocks, chat_messages = await memory_with_blocks._format_memory_blocks(
{"chat_messages": content["chat_messages"]}
)
# Chat messages should be returned directly
assert len(chat_messages) == 2
assert chat_messages[0].role == "user"
assert chat_messages[0].content == "Historical user message"
assert chat_messages[1].role == "assistant"
assert chat_messages[1].content == "Historical assistant response"
@pytest.mark.asyncio
async def test_complex_memory_block_text_mode(memory_with_blocks):
"""Test complex memory block in text mode."""
# Set complex block to text mode
for block in memory_with_blocks.memory_blocks:
if isinstance(block, ComplexMemoryBlock):
block.mode = "text"
break
# Get the memory block content
content = await memory_with_blocks._get_memory_blocks_content([])
# Verify complex block content in text mode
assert "complex_block" in content
assert content["complex_block"] == "Text content from ComplexMemoryBlock"
@pytest.mark.asyncio
async def test_complex_memory_block_blocks_mode(memory_with_blocks):
"""Test complex memory block in blocks mode."""
# Set complex block to blocks mode
for block in memory_with_blocks.memory_blocks:
if isinstance(block, ComplexMemoryBlock):
block.mode = "blocks"
break
# Get the memory block content
content = await memory_with_blocks._get_memory_blocks_content([])
# Verify complex block content in blocks mode
assert "complex_block" in content
assert len(content["complex_block"]) == 2
assert content["complex_block"][0].text == "Complex block 1"
assert content["complex_block"][1].text == "Complex block 2"
@pytest.mark.asyncio
async def test_parameterized_memory_block(memory_with_blocks):
"""Test memory block that accepts parameters."""
# Get memory block content with parameter
content = await memory_with_blocks._get_memory_blocks_content(
[], test_parameter="custom_value"
)
# Verify parameter was passed through
assert "param_block" in content
assert content["param_block"] == "Parameter value: custom_value"
# Try with default parameter
content = await memory_with_blocks._get_memory_blocks_content([])
assert content["param_block"] == "Parameter value: default"
@pytest.mark.asyncio
async def test_truncation_of_content_blocks(memory_with_blocks):
"""Test truncation of content blocks."""
# Get memory blocks content
content = await memory_with_blocks._get_memory_blocks_content([])
content_blocks = content["content_blocks"]
# Get the memory block for truncation
content_block = next(
(
block
for block in memory_with_blocks.memory_blocks
if isinstance(block, ContentBlocksMemoryBlock)
),
None,
)
assert content_block is not None
# Test truncation
truncated = await content_block.atruncate(content_blocks, 100)
assert len(truncated) == 1 # Should have truncated to one block
assert truncated[0].text == "Text block 1"
@pytest.mark.asyncio
async def test_memory_with_all_block_types(memory_with_blocks):
"""Test getting all memory block types together."""
# Set up block content
chat_history = [ChatMessage(role="user", content="Current message")]
# Get final messages with memory blocks included
messages = await memory_with_blocks.aget()
# Should have properly inserted all memory content
assert len(messages) > 0
# Should have a system message with memory blocks
system_messages = [msg for msg in messages if msg.role == "system"]
assert len(system_messages) > 0
# The system message should contain content from our blocks
system_content = system_messages[0].blocks[0].text
assert "Simple text content from TextMemoryBlock" in system_content
assert "Text block 1" in system_content
assert "Text block 2" in system_content
# Should also include direct chat messages from ChatMessagesMemoryBlock
user_historical = [
msg for msg in messages if msg.content == "Historical user message"
]
assert len(user_historical) > 0
@pytest.mark.asyncio
async def test_insert_method_setting():
"""Test that insert_method is respected for blocks."""
# Create blocks with different insert methods
system_block = TextMemoryBlock(
name="system_block",
priority=1,
)
user_block = TextMemoryBlock(
name="user_block",
priority=2,
)
# Create memory with user insert method
memory = Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_insert_methods",
insert_method=InsertMethod.USER,
memory_blocks=[system_block, user_block],
)
# Insert a user message
await memory.aput(ChatMessage(role="user", content="Test message!"))
# Get messages
messages = await memory.aget()
# Should have both system and user messages with appropriate content
system_msgs = [msg for msg in messages if msg.role == "system"]
user_msgs = [msg for msg in messages if msg.role == "user"]
assert len(user_msgs) > 0
assert len(system_msgs) == 0
# Create memory with system insert method
memory = Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_insert_methods",
insert_method=InsertMethod.SYSTEM,
memory_blocks=[system_block, user_block],
)
# Get messages
messages = await memory.aget()
# Should have both system and user messages with appropriate content
system_msgs = [msg for msg in messages if msg.role == "system"]
user_msgs = [msg for msg in messages if msg.role == "user"]
assert len(user_msgs) == 0
assert len(system_msgs) > 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/test_memory_blocks_base.py",
"license": "MIT License",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/storage/chat_store/test_sql.py | import json
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.storage.chat_store.sql import (
SQLAlchemyChatStore,
MessageStatus,
)
@pytest.fixture()
def chat_store() -> SQLAlchemyChatStore:
"""Create a SQLAlchemyChatStore for testing."""
return SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
)
@pytest.mark.asyncio
async def test_add_get_messages(chat_store: SQLAlchemyChatStore):
"""Test adding and retrieving messages."""
# Add messages
await chat_store.add_message("user1", ChatMessage(role="user", content="hello"))
await chat_store.add_message(
"user1", ChatMessage(role="assistant", content="world")
)
# Test getting messages
messages = await chat_store.get_messages("user1")
assert len(messages) == 2
assert messages[0].role == "user"
assert messages[0].content == "hello"
assert messages[1].role == "assistant"
assert messages[1].content == "world"
# Test with non-existent key
empty_messages = await chat_store.get_messages("nonexistent")
assert len(empty_messages) == 0
@pytest.mark.asyncio
async def test_add_messages_batch(chat_store: SQLAlchemyChatStore):
"""Test adding messages in batch."""
batch_messages = [
ChatMessage(role="user", content="hello"),
ChatMessage(role="assistant", content="world"),
ChatMessage(role="user", content="how are you?"),
]
await chat_store.add_messages("batch_user", batch_messages)
messages = await chat_store.get_messages("batch_user")
assert len(messages) == 3
assert [m.content for m in messages] == ["hello", "world", "how are you?"]
@pytest.mark.asyncio
async def test_count_messages(chat_store: SQLAlchemyChatStore):
"""Test counting messages."""
batch_messages = [
ChatMessage(role="user", content="message1"),
ChatMessage(role="assistant", content="message2"),
ChatMessage(role="user", content="message3"),
]
await chat_store.add_messages("count_user", batch_messages)
count = await chat_store.count_messages("count_user")
assert count == 3
# Test count with non-existent key
empty_count = await chat_store.count_messages("nonexistent")
assert empty_count == 0
@pytest.mark.asyncio
async def test_set_messages(chat_store: SQLAlchemyChatStore):
"""Test setting messages (replacing existing ones)."""
# Add initial messages
await chat_store.add_message(
"replace_user", ChatMessage(role="user", content="initial")
)
# Replace with new set
new_messages = [
ChatMessage(role="user", content="replaced1"),
ChatMessage(role="assistant", content="replaced2"),
]
await chat_store.set_messages("replace_user", new_messages)
# Verify replacement
messages = await chat_store.get_messages("replace_user")
assert len(messages) == 2
assert [m.content for m in messages] == ["replaced1", "replaced2"]
@pytest.mark.asyncio
async def test_delete_message(chat_store: SQLAlchemyChatStore):
"""Test deleting a specific message."""
batch_messages = [
ChatMessage(role="user", content="message1"),
ChatMessage(role="assistant", content="message2"),
ChatMessage(role="user", content="message3"),
]
await chat_store.add_messages("delete_user", batch_messages)
# Get messages to find their IDs
async with chat_store._async_session_factory() as session:
result = await session.execute(
chat_store._table.select().where(chat_store._table.c.key == "delete_user")
)
rows = result.fetchall()
# Delete the middle message
middle_id = rows[1].id
deleted_message = await chat_store.delete_message("delete_user", middle_id)
# Verify deletion
assert deleted_message.content == "message2"
remaining_messages = await chat_store.get_messages("delete_user")
assert len(remaining_messages) == 2
assert [m.content for m in remaining_messages] == ["message1", "message3"]
@pytest.mark.asyncio
async def test_delete_messages(chat_store: SQLAlchemyChatStore):
"""Test deleting all messages for a key."""
# Add messages for multiple users
await chat_store.add_message(
"delete_all_user1", ChatMessage(role="user", content="user1_message")
)
await chat_store.add_message(
"delete_all_user2", ChatMessage(role="user", content="user2_message")
)
# Delete messages for user1
await chat_store.delete_messages("delete_all_user1")
# Verify deletion
user1_messages = await chat_store.get_messages("delete_all_user1")
user2_messages = await chat_store.get_messages("delete_all_user2")
assert len(user1_messages) == 0
assert len(user2_messages) == 1
@pytest.mark.asyncio
async def test_delete_oldest_messages(chat_store: SQLAlchemyChatStore):
"""Test deleting oldest messages."""
batch_messages = [
ChatMessage(role="user", content="oldest"),
ChatMessage(role="assistant", content="middle"),
ChatMessage(role="user", content="newest"),
]
await chat_store.add_messages("oldest_test", batch_messages)
# Delete oldest message
deleted = await chat_store.delete_oldest_messages("oldest_test", 1)
# Verify deleted message
assert len(deleted) == 1
assert deleted[0].content == "oldest"
# Verify remaining messages
remaining = await chat_store.get_messages("oldest_test")
assert len(remaining) == 2
assert [m.content for m in remaining] == ["middle", "newest"]
@pytest.mark.asyncio
async def test_archive_oldest_messages(chat_store: SQLAlchemyChatStore):
"""Test archiving oldest messages."""
batch_messages = [
ChatMessage(role="user", content="oldest"),
ChatMessage(role="assistant", content="middle"),
ChatMessage(role="user", content="newest"),
]
await chat_store.add_messages("archive_test", batch_messages)
# Archive oldest message
archived = await chat_store.archive_oldest_messages("archive_test", 1)
# Verify archived message
assert len(archived) == 1
assert archived[0].content == "oldest"
# Verify active messages
active = await chat_store.get_messages("archive_test", status=MessageStatus.ACTIVE)
assert len(active) == 2
assert [m.content for m in active] == ["middle", "newest"]
# Verify archived messages
archived_msgs = await chat_store.get_messages(
"archive_test", status=MessageStatus.ARCHIVED
)
assert len(archived_msgs) == 1
assert archived_msgs[0].content == "oldest"
@pytest.mark.asyncio
async def test_get_messages_with_limit_offset(chat_store: SQLAlchemyChatStore):
"""Test getting messages with limit and offset."""
batch_messages = [
ChatMessage(role="user", content="message1"),
ChatMessage(role="assistant", content="message2"),
ChatMessage(role="user", content="message3"),
ChatMessage(role="assistant", content="message4"),
ChatMessage(role="user", content="message5"),
]
await chat_store.add_messages("pagination_test", batch_messages)
# Test with limit
limited = await chat_store.get_messages("pagination_test", limit=2)
assert len(limited) == 2
assert [m.content for m in limited] == ["message1", "message2"]
# Test with offset
offset = await chat_store.get_messages("pagination_test", offset=2)
assert len(offset) == 3
assert [m.content for m in offset] == ["message3", "message4", "message5"]
# Test with both limit and offset
paginated = await chat_store.get_messages("pagination_test", limit=2, offset=1)
assert len(paginated) == 2
assert [m.content for m in paginated] == ["message2", "message3"]
@pytest.mark.asyncio
async def test_get_keys(chat_store: SQLAlchemyChatStore):
"""Test getting all unique keys."""
# Add messages for multiple users
await chat_store.add_message(
"keys_user1", ChatMessage(role="user", content="user1_message")
)
await chat_store.add_message(
"keys_user2", ChatMessage(role="user", content="user2_message")
)
await chat_store.add_message(
"keys_user3", ChatMessage(role="user", content="user3_message")
)
# Get all keys
keys = await chat_store.get_keys()
# Verify keys (note: other tests may add more keys)
expected_keys = {"keys_user1", "keys_user2", "keys_user3"}
assert expected_keys.issubset(set(keys))
@pytest.mark.asyncio
async def test_dump_load_store(chat_store: SQLAlchemyChatStore):
"""Test dumping and loading the store."""
# Add some messages
await chat_store.add_message(
"dump_user1", ChatMessage(role="user", content="message1")
)
await chat_store.add_message(
"dump_user2", ChatMessage(role="user", content="message2")
)
# Dump the store
store_dict = chat_store.model_dump()
# ensure it's valid json
_ = json.dumps(store_dict)
# Load the store
loaded_store = SQLAlchemyChatStore.model_validate(store_dict)
# verify the loaded store is equivalent to the original store
assert loaded_store.table_name == chat_store.table_name
assert loaded_store.async_database_uri == chat_store.async_database_uri
# verify the messages are the same
messages = await loaded_store.get_messages("dump_user1")
assert len(messages) == 1
assert messages[0].content == "message1"
messages = await loaded_store.get_messages("dump_user2")
assert len(messages) == 1
assert messages[0].content == "message2"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/storage/chat_store/test_sql.py",
"license": "MIT License",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-gel/llama_index/storage/chat_store/gel/base.py | from typing import Optional
import logging
import textwrap
from jinja2 import Template
from llama_index.core.llms import ChatMessage
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.storage.chat_store.base import BaseChatStore
_logger = logging.getLogger(__name__)
IMPORT_ERROR_MESSAGE = """
Error: Gel Python package is not installed.
Please install it using 'pip install gel'.
"""
NO_PROJECT_MESSAGE = """
Error: it appears that the Gel project has not been initialized.
If that's the case, please run 'gel project init' to get started.
"""
MISSING_RECORD_TYPE_TEMPLATE = """
Error: Record type {{record_type}} is missing from the Gel schema.
In order to use the LlamaIndex integration, ensure you put the following in dbschema/default.gel:
module default {
type {{record_type}} {
required key: str {
constraint exclusive;
}
value: array<json>;
}
}
Remember that you also need to run a migration:
$ gel migration create
$ gel migrate
"""
try:
import gel
except ImportError as e:
_logger.error(IMPORT_ERROR_MESSAGE)
raise
def format_query(text: str) -> str:
return textwrap.dedent(text.strip())
SET_MESSAGES_QUERY = format_query(
"""
insert Record {
key := <str>$key,
value := <array<json>>$value
} unless conflict on .key else (
update Record set {
value := <array<json>>$value
}
)
"""
)
GET_MESSAGES_QUERY = format_query(
"""
with
record := (select Record filter .key = <str>$key),
select record.value;
"""
)
ADD_MESSAGE_QUERY = format_query(
"""
insert Record {
key := <str>$key,
value := <array<json>>$value
} unless conflict on .key else (
update Record set {
value := .value ++ <array<json>>$value
}
)
"""
)
DELETE_MESSAGES_QUERY = format_query(
"""
delete Record filter .key = <str>$key
"""
)
DELETE_MESSAGE_QUERY = format_query(
"""
with
idx := <int64>$idx,
value := (select Record filter .key = <str>$key).value,
idx_item := value[idx],
new_value := value[:idx] ++ value[idx+1:],
updated_record := (
update Record
filter .key = <str>$key
set {
value := new_value
}
)
select idx_item;
"""
)
DELETE_LAST_MESSAGE_QUERY = format_query(
"""
with
value := (select Record filter .key = <str>$key).value,
last_item := value[len(value) - 1],
new_value := value[:len(value) - 1],
updated_record := (
update Record
filter .key = <str>$key
set {
value := new_value
}
)
select last_item;
"""
)
GET_KEYS_QUERY = format_query(
"""
select Record.key;
"""
)
class GelChatStore(BaseChatStore):
"""
Chat store implementation using Gel database.
Stores and retrieves chat messages using Gel as the backend storage.
"""
record_type: str
_sync_client: Optional[gel.Client] = PrivateAttr()
_async_client: Optional[gel.AsyncIOClient] = PrivateAttr()
def __init__(
self,
record_type: str = "Record",
):
"""
Initialize GelChatStore.
Args:
record_type: The name of the record type in Gel schema.
"""
super().__init__(record_type=record_type)
self._sync_client = None
self._async_client = None
def get_sync_client(self):
"""Get or initialize a synchronous Gel client."""
if self._async_client is not None:
raise RuntimeError(
"GelChatStore has already been used in async mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._sync_client is None:
self._sync_client = gel.create_client()
try:
self._sync_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
self._sync_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._sync_client
async def get_async_client(self):
"""Get or initialize an asynchronous Gel client."""
if self._sync_client is not None:
raise RuntimeError(
"GelChatStore has already been used in sync mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._async_client is None:
self._async_client = gel.create_async_client()
try:
await self._async_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
await self._async_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._async_client
def set_messages(self, key: str, messages: list[ChatMessage]) -> None:
"""Set messages for a key."""
client = self.get_sync_client()
client.query(
SET_MESSAGES_QUERY,
key=key,
value=[message.model_dump_json() for message in messages],
)
async def aset_messages(self, key: str, messages: list[ChatMessage]) -> None:
"""Async version of Get messages for a key."""
client = await self.get_async_client()
await client.query(
SET_MESSAGES_QUERY,
key=key,
value=[message.model_dump_json() for message in messages],
)
def get_messages(self, key: str) -> list[ChatMessage]:
"""Get messages for a key."""
client = self.get_sync_client()
result = client.query_single(GET_MESSAGES_QUERY, key=key) or []
return [ChatMessage.model_validate_json(message) for message in result]
async def aget_messages(self, key: str) -> list[ChatMessage]:
"""Async version of Get messages for a key."""
client = await self.get_async_client()
result = await client.query_single(GET_MESSAGES_QUERY, key=key) or []
return [ChatMessage.model_validate_json(message) for message in result]
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
client = self.get_sync_client()
client.query(ADD_MESSAGE_QUERY, key=key, value=[message.model_dump_json()])
async def async_add_message(self, key: str, message: ChatMessage) -> None:
"""Async version of Add a message for a key."""
client = await self.get_async_client()
await client.query(
ADD_MESSAGE_QUERY, key=key, value=[message.model_dump_json()]
)
def delete_messages(self, key: str) -> Optional[list[ChatMessage]]:
"""Delete messages for a key."""
client = self.get_sync_client()
client.query(DELETE_MESSAGES_QUERY, key=key)
async def adelete_messages(self, key: str) -> Optional[list[ChatMessage]]:
"""Async version of Delete messages for a key."""
client = await self.get_async_client()
await client.query(DELETE_MESSAGES_QUERY, key=key)
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
client = self.get_sync_client()
result = client.query_single(DELETE_MESSAGE_QUERY, key=key, idx=idx)
return ChatMessage.model_validate_json(result) if result else None
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Async version of Delete specific message for a key."""
client = await self.get_async_client()
result = await client.query_single(DELETE_MESSAGE_QUERY, key=key, idx=idx)
return ChatMessage.model_validate_json(result) if result else None
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
client = self.get_sync_client()
result = client.query_single(DELETE_LAST_MESSAGE_QUERY, key=key)
return ChatMessage.model_validate_json(result) if result else None
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Async version of Delete last message for a key."""
client = await self.get_async_client()
result = await client.query_single(DELETE_LAST_MESSAGE_QUERY, key=key)
return ChatMessage.model_validate_json(result) if result else None
def get_keys(self) -> list[str]:
"""Get all keys."""
client = self.get_sync_client()
return client.query(GET_KEYS_QUERY)
async def aget_keys(self) -> list[str]:
"""Async version of Get all keys."""
client = await self.get_async_client()
return await client.query(GET_KEYS_QUERY)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-gel/llama_index/storage/chat_store/gel/base.py",
"license": "MIT License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-gel/tests/test_chat_store_gel_chat_store.py | import subprocess
import pytest
import pytest_asyncio
import os
from typing import Generator
from llama_index.core.llms import ChatMessage
from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.storage.chat_store.gel import GelChatStore
skip_in_cicd = os.environ.get("CI") is not None
try:
if not skip_in_cicd:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
def test_class():
names_of_base_classes = [b.__name__ for b in GelChatStore.__mro__]
assert BaseChatStore.__name__ in names_of_base_classes
@pytest.fixture()
def gel_chat_store() -> Generator[GelChatStore, None, None]:
chat_store = None
try:
chat_store = GelChatStore()
yield chat_store
finally:
if chat_store:
keys = chat_store.get_keys()
for key in keys:
chat_store.delete_messages(key)
@pytest_asyncio.fixture()
async def gel_chat_store_async():
# New instance of the GelKVStore client to use it in async mode
chat_store = None
try:
chat_store = GelChatStore()
yield chat_store
finally:
if chat_store:
keys = await chat_store.aget_keys()
for key in keys:
await chat_store.adelete_messages(key)
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_gel_add_message(gel_chat_store: GelChatStore):
key = "test_add_key"
message = ChatMessage(content="add_message_test", role="user")
gel_chat_store.add_message(key, message=message)
result = gel_chat_store.get_messages(key)
assert result[0].content == "add_message_test" and result[0].role == "user"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_set_and_retrieve_messages(gel_chat_store: GelChatStore):
messages = [
ChatMessage(content="First message", role="user"),
ChatMessage(content="Second message", role="user"),
]
key = "test_set_key"
gel_chat_store.set_messages(key, messages)
retrieved_messages = gel_chat_store.get_messages(key)
assert len(retrieved_messages) == 2
assert retrieved_messages[0].content == "First message"
assert retrieved_messages[1].content == "Second message"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_delete_messages(gel_chat_store: GelChatStore):
messages = [ChatMessage(content="Message to delete", role="user")]
key = "test_delete_key"
gel_chat_store.set_messages(key, messages)
gel_chat_store.delete_messages(key)
retrieved_messages = gel_chat_store.get_messages(key)
assert retrieved_messages == []
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_delete_specific_message(gel_chat_store: GelChatStore):
messages = [
ChatMessage(content="Keep me", role="user"),
ChatMessage(content="Delete me", role="user"),
]
key = "test_delete_message_key"
gel_chat_store.set_messages(key, messages)
deleted_message = gel_chat_store.delete_message(key, 1)
retrieved_messages = gel_chat_store.get_messages(key)
assert len(retrieved_messages) == 1
assert retrieved_messages[0].content == "Keep me"
assert deleted_message.content == "Delete me"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_get_keys(gel_chat_store: GelChatStore):
# Add some test data
gel_chat_store.set_messages("key1", [ChatMessage(content="Test1", role="user")])
gel_chat_store.set_messages("key2", [ChatMessage(content="Test2", role="user")])
keys = gel_chat_store.get_keys()
assert "key1" in keys
assert "key2" in keys
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
def test_delete_last_message(gel_chat_store: GelChatStore):
key = "test_delete_last_message"
messages = [
ChatMessage(content="First message", role="user"),
ChatMessage(content="Last message", role="user"),
]
gel_chat_store.set_messages(key, messages)
deleted_message = gel_chat_store.delete_last_message(key)
assert deleted_message.content == "Last message"
remaining_messages = gel_chat_store.get_messages(key)
assert len(remaining_messages) == 1
assert remaining_messages[0].content == "First message"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_gel_add_message(gel_chat_store_async: GelChatStore):
key = "test_async_add_key"
message = ChatMessage(content="async_add_message_test", role="user")
await gel_chat_store_async.async_add_message(key, message=message)
result = await gel_chat_store_async.aget_messages(key)
assert result[0].content == "async_add_message_test" and result[0].role == "user"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_set_and_retrieve_messages(gel_chat_store_async: GelChatStore):
messages = [
ChatMessage(content="First async message", role="user"),
ChatMessage(content="Second async message", role="user"),
]
key = "test_async_set_key"
await gel_chat_store_async.aset_messages(key, messages)
retrieved_messages = await gel_chat_store_async.aget_messages(key)
assert len(retrieved_messages) == 2
assert retrieved_messages[0].content == "First async message"
assert retrieved_messages[1].content == "Second async message"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_delete_messages(gel_chat_store_async: GelChatStore):
messages = [ChatMessage(content="Async message to delete", role="user")]
key = "test_async_delete_key"
await gel_chat_store_async.aset_messages(key, messages)
await gel_chat_store_async.adelete_messages(key)
retrieved_messages = await gel_chat_store_async.aget_messages(key)
assert retrieved_messages == []
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_delete_specific_message(gel_chat_store_async: GelChatStore):
messages = [
ChatMessage(content="Async keep me", role="user"),
ChatMessage(content="Async delete me", role="user"),
]
key = "test_adelete_message_key"
await gel_chat_store_async.aset_messages(key, messages)
deleted_message = await gel_chat_store_async.adelete_message(key, 1)
retrieved_messages = await gel_chat_store_async.aget_messages(key)
assert len(retrieved_messages) == 1
assert retrieved_messages[0].content == "Async keep me"
assert deleted_message.content == "Async delete me"
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_get_keys(gel_chat_store_async: GelChatStore):
# Add some test data
await gel_chat_store_async.aset_messages(
"async_key1", [ChatMessage(content="Test1", role="user")]
)
await gel_chat_store_async.aset_messages(
"async_key2", [ChatMessage(content="Test2", role="user")]
)
keys = await gel_chat_store_async.aget_keys()
assert "async_key1" in keys
assert "async_key2" in keys
@pytest.mark.skipif(skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_async_delete_last_message(gel_chat_store_async: GelChatStore):
key = "test_async_delete_last_message"
messages = [
ChatMessage(content="First async message", role="user"),
ChatMessage(content="Last async message", role="user"),
]
await gel_chat_store_async.aset_messages(key, messages)
deleted_message = await gel_chat_store_async.adelete_last_message(key)
assert deleted_message.content == "Last async message"
remaining_messages = await gel_chat_store_async.aget_messages(key)
assert len(remaining_messages) == 1
assert remaining_messages[0].content == "First async message"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-gel/tests/test_chat_store_gel_chat_store.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/llama_index/storage/docstore/gel/base.py | from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.gel import GelKVStore
class GelDocumentStore(KVDocumentStore):
"""
Gel Document (Node) store.
A Gel store for Document and Node objects.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a GelDocumentStore."""
super().__init__(gel_kvstore, namespace=namespace, batch_size=batch_size)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/llama_index/storage/docstore/gel/base.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/tests/test_gel.py | import subprocess
import pytest
import os
from typing import List, Generator
from llama_index.core.schema import BaseNode, Document
from llama_index.storage.docstore.gel import (
GelDocumentStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except ImportError:
no_packages = True
skip_in_cicd = os.environ.get("CI") is not None
try:
if not skip_in_cicd:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
@pytest.fixture()
def documents() -> List[Document]:
return [
Document(text="doc_1"),
Document(text="doc_2"),
]
@pytest.fixture()
def gel_kvstore() -> Generator[GelKVStore, None, None]:
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
keys = kvstore.get_all().keys()
for key in keys:
kvstore.delete(key)
@pytest.fixture()
def gel_docstore(gel_kvstore: GelKVStore) -> Generator[GelDocumentStore, None, None]:
docstore = None
try:
docstore = GelDocumentStore(gel_kvstore=gel_kvstore)
for id_ in docstore.docs:
docstore.delete_document(id_)
yield docstore
finally:
if docstore:
for id_ in docstore.docs:
docstore.delete_document(id_)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel not installed")
def test_gel_docstore(
gel_docstore: GelDocumentStore, documents: List[Document]
) -> None:
ds = gel_docstore
assert len(ds.docs) == 0
# test adding documents
ds.add_documents(documents)
assert len(ds.docs) == 2
assert all(isinstance(doc, BaseNode) for doc in ds.docs.values())
# test updating documents
ds.add_documents(documents)
print(ds.docs)
assert len(ds.docs) == 2
# test getting documents
doc0 = ds.get_document(documents[0].get_doc_id())
assert doc0 is not None
assert documents[0].get_content() == doc0.get_content()
# test deleting documents
ds.delete_document(documents[0].get_doc_id())
assert len(ds.docs) == 1
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel not installed")
def test_gel_docstore_hash(
gel_docstore: GelDocumentStore, documents: List[Document]
) -> None:
ds = gel_docstore
# Test setting hash
ds.set_document_hash("test_doc_id", "test_doc_hash")
doc_hash = ds.get_document_hash("test_doc_id")
assert doc_hash == "test_doc_hash"
# Test updating hash
ds.set_document_hash("test_doc_id", "test_doc_hash_new")
doc_hash = ds.get_document_hash("test_doc_id")
assert doc_hash == "test_doc_hash_new"
# Test getting non-existent
doc_hash = ds.get_document_hash("test_not_exist")
assert doc_hash is None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/tests/test_gel.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/tests/test_storage_docstore_gel.py | from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.docstore.gel import GelDocumentStore
def test_class():
names_of_base_classes = [b.__name__ for b in GelDocumentStore.__mro__]
assert KVDocumentStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/docstore/llama-index-storage-docstore-gel/tests/test_storage_docstore_gel.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/llama_index/storage/index_store/gel/base.py | from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.gel import GelKVStore
class GelIndexStore(KVIndexStore):
"""
Gel Index store.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a GelIndexStore."""
super().__init__(
gel_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/llama_index/storage/index_store/gel/base.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/tests/test_gel.py | import pytest
import subprocess
import os
from typing import Generator
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.storage.index_store.gel import (
GelIndexStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except ImportError:
no_packages = True
skip_in_cicd = os.environ.get("CI") is not None
try:
if not skip_in_cicd:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
@pytest.fixture()
def gel_kvstore() -> Generator[GelKVStore, None, None]:
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
keys = kvstore.get_all().keys()
for key in keys:
kvstore.delete(key)
@pytest.fixture()
def gel_indexstore(gel_kvstore: GelKVStore) -> GelIndexStore:
return GelIndexStore(gel_kvstore=gel_kvstore)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel not installed")
def test_gel_index_store(gel_indexstore: GelIndexStore) -> None:
index_struct = IndexGraph()
index_store = gel_indexstore
index_store.add_index_struct(index_struct)
assert index_store.get_index_struct(struct_id=index_struct.index_id) == index_struct
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/tests/test_gel.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/tests/test_storage_index_store_gel.py | from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.index_store.gel import GelIndexStore
def test_class():
names_of_base_classes = [b.__name__ for b in GelIndexStore.__mro__]
assert KVIndexStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/index_store/llama-index-storage-index-store-gel/tests/test_storage_index_store_gel.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/llama_index/storage/kvstore/gel/base.py | from typing import Dict, List, Optional, Tuple
import json
import logging
import textwrap
from jinja2 import Template
from llama_index.core.storage.kvstore.types import (
DEFAULT_BATCH_SIZE,
DEFAULT_COLLECTION,
BaseKVStore,
)
_logger = logging.getLogger(__name__)
IMPORT_ERROR_MESSAGE = """
Error: Gel Python package is not installed.
Please install it using 'pip install gel'.
"""
NO_PROJECT_MESSAGE = """
Error: it appears that the Gel project has not been initialized.
If that's the case, please run 'gel project init' to get started.
"""
MISSING_RECORD_TYPE_TEMPLATE = """
Error: Record type {{record_type}} is missing from the Gel schema.
In order to use the LlamaIndex integration, ensure you put the following in dbschema/default.gel:
module default {
type {{record_type}} {
required key: str;
required namespace: str;
value: json;
constraint exclusive on ((.key, .namespace));
}
}
Remember that you also need to run a migration:
$ gel migration create
$ gel migrate
"""
try:
import gel
except ImportError as e:
_logger.error(IMPORT_ERROR_MESSAGE)
raise
def format_query(text: str) -> str:
return textwrap.dedent(text.strip())
PUT_QUERY = format_query(
"""
insert Record {
key := <str>$key,
namespace := <str>$namespace,
value := <json>$value
} unless conflict on (.key, .namespace) else (
update Record set {
value := <json>$value
}
)
"""
)
PUT_ALL_QUERY = format_query(
"""
with
raw_data := <json>$data,
namespace := <str>$namespace,
for item in json_array_unpack(raw_data) union (
insert Record {
key := <str>item['key'],
namespace := namespace,
value := <json>item['value']
} unless conflict on (.key, .namespace) else (
update Record set {
value := <json>item['value']
}
)
);
"""
)
GET_QUERY = format_query(
"""
with record := (
select Record
filter .key = <str>$key and .namespace = <str>$namespace
)
select record.value;
"""
)
GET_ALL_QUERY = format_query(
"""
select Record {
key,
value
}
filter .namespace = <str>$namespace;
"""
)
DELETE_QUERY = format_query(
"""
delete Record filter .key = <str>$key and .namespace = <str>$namespace;
"""
)
class GelKVStore(BaseKVStore):
"""Gel Key-Value store."""
def __init__(self, record_type: str = "Record") -> None:
"""
Initialize GelKVStore.
Args:
record_type: The name of the record type in Gel schema.
"""
self.record_type = record_type
self._sync_client = None
self._async_client = None
def get_sync_client(self):
"""
Get or initialize a synchronous Gel client.
Ensures the client is connected and the record type exists.
Returns:
A connected synchronous Gel client.
"""
if self._async_client is not None:
raise RuntimeError(
"GelKVStore has already been used in async mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._sync_client is None:
self._sync_client = gel.create_client()
try:
self._sync_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
self._sync_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._sync_client
async def get_async_client(self):
"""
Get or initialize an asynchronous Gel client.
Ensures the client is connected and the record type exists.
Returns:
A connected asynchronous Gel client.
"""
if self._sync_client is not None:
raise RuntimeError(
"GelKVStore has already been used in sync mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._async_client is None:
self._async_client = gel.create_async_client()
try:
await self._async_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
await self._async_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._async_client
def put(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
client = self.get_sync_client()
client.query(
PUT_QUERY,
key=key,
namespace=collection,
value=json.dumps(val),
)
async def aput(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
client = await self.get_async_client()
await client.query(
PUT_QUERY,
key=key,
namespace=collection,
value=json.dumps(val),
)
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Store multiple key-value pairs in batches.
Args:
kv_pairs: List of (key, value) tuples to store.
collection: Namespace for the keys.
batch_size: Number of pairs to store in each batch.
"""
for chunk in (
kv_pairs[pos : pos + batch_size]
for pos in range(0, len(kv_pairs), batch_size)
):
client = self.get_sync_client()
client.query(
PUT_ALL_QUERY,
data=json.dumps([{"key": key, "value": value} for key, value in chunk]),
namespace=collection,
)
async def aput_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Async version of put_all.
Args:
kv_pairs: List of (key, value) tuples to store.
collection: Namespace for the keys.
batch_size: Number of pairs to store in each batch.
"""
for chunk in (
kv_pairs[pos : pos + batch_size]
for pos in range(0, len(kv_pairs), batch_size)
):
client = await self.get_async_client()
await client.query(
PUT_ALL_QUERY,
data=json.dumps([{"key": key, "value": value} for key, value in chunk]),
namespace=collection,
)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = self.get_sync_client()
result = client.query_single(
GET_QUERY,
key=key,
namespace=collection,
)
return json.loads(result) if result is not None else None
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = await self.get_async_client()
result = await client.query_single(
GET_QUERY,
key=key,
namespace=collection,
)
return json.loads(result) if result is not None else None
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
client = self.get_sync_client()
results = client.query(
GET_ALL_QUERY,
namespace=collection,
)
return {result.key: json.loads(result.value) for result in results}
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
client = await self.get_async_client()
results = await client.query(
GET_ALL_QUERY,
namespace=collection,
)
return {result.key: json.loads(result.value) for result in results}
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = self.get_sync_client()
result = client.query(
DELETE_QUERY,
key=key,
namespace=collection,
)
return len(result) > 0
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = await self.get_async_client()
result = await client.query(
DELETE_QUERY,
key=key,
namespace=collection,
)
return len(result) > 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/llama_index/storage/kvstore/gel/base.py",
"license": "MIT License",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/tests/test_gel.py | import subprocess
import os
from typing import Generator
import pytest
import pytest_asyncio
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except ImportError:
no_packages = True
# Skip tests in CICD
skip_in_cicd = os.environ.get("CI") is not None
try:
if not skip_in_cicd:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
@pytest.fixture()
def gel_kvstore() -> Generator[GelKVStore, None, None]:
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
keys = kvstore.get_all().keys()
for key in keys:
kvstore.delete(key)
@pytest_asyncio.fixture()
async def gel_kvstore_async():
# New instance of the GelKVStore client to use it in async mode
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
all_items = await kvstore.aget_all()
keys = all_items.keys()
for key in keys:
await kvstore.adelete(key)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
def test_kvstore_basic(gel_kvstore: GelKVStore) -> None:
test_key = "test_key_basic"
test_blob = {"test_obj_key": "test_obj_val"}
gel_kvstore.put(test_key, test_blob)
blob = gel_kvstore.get(test_key)
assert blob == test_blob
blob = gel_kvstore.get(test_key, collection="non_existent")
assert blob is None
deleted = gel_kvstore.delete(test_key)
assert deleted
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_kvstore_async_basic(gel_kvstore_async: GelKVStore) -> None:
test_key = "test_key_basic"
test_blob = {"test_obj_key": "test_obj_val"}
await gel_kvstore_async.aput(test_key, test_blob)
blob = await gel_kvstore_async.aget(test_key)
assert blob == test_blob
blob = await gel_kvstore_async.aget(test_key, collection="non_existent")
assert blob is None
deleted = await gel_kvstore_async.adelete(test_key)
assert deleted
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
def test_kvstore_delete(gel_kvstore: GelKVStore) -> None:
test_key = "test_key_delete"
test_blob = {"test_obj_key": "test_obj_val"}
gel_kvstore.put(test_key, test_blob)
blob = gel_kvstore.get(test_key)
assert blob == test_blob
gel_kvstore.delete(test_key)
blob = gel_kvstore.get(test_key)
assert blob is None
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_kvstore_adelete(gel_kvstore_async: GelKVStore) -> None:
test_key = "test_key_delete"
test_blob = {"test_obj_key": "test_obj_val"}
await gel_kvstore_async.aput(test_key, test_blob)
blob = await gel_kvstore_async.aget(test_key)
assert blob == test_blob
await gel_kvstore_async.adelete(test_key)
blob = await gel_kvstore_async.aget(test_key)
assert blob is None
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
def test_kvstore_getall(gel_kvstore: GelKVStore) -> None:
test_key_1 = "test_key_1"
test_blob_1 = {"test_obj_key": "test_obj_val"}
gel_kvstore.put(test_key_1, test_blob_1)
blob = gel_kvstore.get(test_key_1)
assert blob == test_blob_1
test_key_2 = "test_key_2"
test_blob_2 = {"test_obj_key": "test_obj_val"}
gel_kvstore.put(test_key_2, test_blob_2)
blob = gel_kvstore.get(test_key_2)
assert blob == test_blob_2
blob = gel_kvstore.get_all()
assert len(blob) == 2
gel_kvstore.delete(test_key_1)
gel_kvstore.delete(test_key_2)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_kvstore_agetall(gel_kvstore_async: GelKVStore) -> None:
test_key_1 = "test_key_1"
test_blob_1 = {"test_obj_key": "test_obj_val"}
await gel_kvstore_async.aput(test_key_1, test_blob_1)
blob = await gel_kvstore_async.aget(test_key_1)
assert blob == test_blob_1
test_key_2 = "test_key_2"
test_blob_2 = {"test_obj_key": "test_obj_val"}
await gel_kvstore_async.aput(test_key_2, test_blob_2)
blob = await gel_kvstore_async.aget(test_key_2)
assert blob == test_blob_2
blob = await gel_kvstore_async.aget_all()
assert len(blob) == 2
await gel_kvstore_async.adelete(test_key_1)
await gel_kvstore_async.adelete(test_key_2)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel package not installed")
@pytest.mark.asyncio
async def test_kvstore_putall(gel_kvstore_async: GelKVStore) -> None:
test_key = "test_key_putall_1"
test_blob = {"test_obj_key": "test_obj_val"}
test_key2 = "test_key_putall_2"
test_blob2 = {"test_obj_key2": "test_obj_val2"}
await gel_kvstore_async.aput_all([(test_key, test_blob), (test_key2, test_blob2)])
blob = await gel_kvstore_async.aget(test_key)
assert blob == test_blob
blob = await gel_kvstore_async.aget(test_key2)
assert blob == test_blob2
await gel_kvstore_async.adelete(test_key)
await gel_kvstore_async.adelete(test_key2)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/tests/test_gel.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/tests/test_storage_kvstore_gel.py | from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.gel import GelKVStore
def test_class():
names_of_base_classes = [b.__name__ for b in GelKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/tests/test_storage_kvstore_gel.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-gel/llama_index/vector_stores/gel/base.py | import logging
from typing import (
Any,
List,
Optional,
Sequence,
)
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterOperator,
FilterCondition,
MetadataFilters,
MetadataFilter,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from jinja2 import Template
import json
import textwrap
_logger = logging.getLogger(__name__)
IMPORT_ERROR_MESSAGE = """
Error: Gel Python package is not installed.
Please install it using 'pip install gel'.
"""
NO_PROJECT_MESSAGE = """
Error: it appears that the Gel project has not been initialized.
If that's the case, please run 'gel project init' to get started.
"""
MISSING_RECORD_TYPE_TEMPLATE = """
Error: Record type {{record_type}} is missing from the Gel schema.
In order to use the LlamaIndex integration, ensure you put the following in dbschema/default.gel:
using extension pgvector;
module default {
scalar type EmbeddingVector extending ext::pgvector::vector<1536>;
type {{record_type}} {
required collection: str;
text: str;
embedding: EmbeddingVector;
external_id: str {
constraint exclusive;
};
metadata: json;
index ext::pgvector::hnsw_cosine(m := 16, ef_construction := 128)
on (.embedding)
}
}
Remember that you also need to run a migration:
$ gel migration create
$ gel migrate
"""
try:
import gel
except ImportError as e:
_logger.error(IMPORT_ERROR_MESSAGE)
raise
def format_query(text: str) -> Template:
return Template(textwrap.dedent(text.strip()))
COSINE_SIMILARITY_QUERY = format_query(
"""
with collection_records := (select {{record_type}} filter .collection = <str>$collection_name)
select collection_records {
external_id,
text,
embedding,
metadata,
cosine_similarity := 1 - ext::pgvector::cosine_distance(
.embedding, <ext::pgvector::vector>$query_embedding),
}
{{filter_clause}}
order by .cosine_similarity desc empty last
limit <optional int64>$limit;
"""
)
SELECT_BY_DOC_ID_QUERY = format_query(
"""
select {{record_type}} {
external_id,
text,
embedding,
metadata,
}
filter .external_id in array_unpack(<array<str>>$external_ids);
"""
)
INSERT_QUERY = format_query(
"""
select (
insert {{record_type}} {
collection := <str>$collection_name,
external_id := <optional str>$external_id,
text := <str>$text,
embedding := <ext::pgvector::vector>$embedding,
metadata := <json>$metadata,
}
) { external_id }
"""
)
DELETE_BY_IDS_QUERY = format_query(
"""
with collection_records := (select {{record_type}} filter .collection = <str>$collection_name)
delete {{record_type}}
filter .external_id in array_unpack(<array<str>>$external_ids);
"""
)
DELETE_ALL_QUERY = format_query(
"""
delete {{record_type}}
filter .collection = <str>$collection_name;
"""
)
def get_filter_clause(filters: MetadataFilters) -> str:
"""Convert metadata filters to Gel query filter clause."""
subclauses = []
for filter in filters.filters:
if isinstance(filter, MetadataFilters):
subclause = get_filter_clause(filter)
elif isinstance(filter, MetadataFilter):
formatted_value = (
f'"{filter.value}"' if isinstance(filter.value, str) else filter.value
)
if filter.operator == FilterOperator.EQ.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") = {formatted_value}'
)
elif filter.operator == FilterOperator.GT.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") > {formatted_value}'
)
elif filter.operator == FilterOperator.LT.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") < {formatted_value}'
)
elif filter.operator == FilterOperator.NE.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") != {formatted_value}'
)
elif filter.operator == FilterOperator.GTE.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") >= {formatted_value}'
)
elif filter.operator == FilterOperator.LTE.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") <= {formatted_value}'
)
elif filter.operator == FilterOperator.IN.value:
subclause = f'<str>json_get(.metadata, "{filter.key}") in array_unpack({formatted_value})'
elif filter.operator == FilterOperator.NIN.value:
subclause = f'<str>json_get(.metadata, "{filter.key}") not in array_unpack({formatted_value})'
elif filter.operator == FilterOperator.ANY.value:
subclause = f'any(<str>json_get(.metadata, "{filter.key}") = array_unpack({formatted_value}))'
elif filter.operator == FilterOperator.ALL.value:
subclause = f'all(<str>json_get(.metadata, "{filter.key}") = array_unpack({formatted_value}))'
elif filter.operator == FilterOperator.TEXT_MATCH.value:
subclause = (
f'<str>json_get(.metadata, "{filter.key}") like {formatted_value}'
)
elif filter.operator == FilterOperator.CONTAINS.value:
subclause = f'contains(<str>json_get(.metadata, "{filter.key}"), {formatted_value})'
elif filter.operator == FilterOperator.IS_EMPTY.value:
subclause = f'not exists <str>json_get(.metadata, "{filter.key}")'
else:
raise ValueError(f"Unknown operator: {filter.operator}")
subclauses.append(subclause)
if filters.condition == FilterCondition.AND:
filter_clause = " and ".join(subclauses)
return "(" + filter_clause + ")" if len(subclauses) > 1 else filter_clause
elif filters.condition == FilterCondition.OR:
filter_clause = " or ".join(subclauses)
return "(" + filter_clause + ")" if len(subclauses) > 1 else filter_clause
else:
raise ValueError(f"Unknown condition: {filters.condition}")
class GelVectorStore(BasePydanticVectorStore):
"""
Gel-backed vector store implementation.
Stores and retrieves vectors using Gel database with pgvector extension.
"""
stores_text: bool = True
collection_name: str
record_type: str
_sync_client: gel.Client = PrivateAttr()
_async_client: gel.AsyncIOClient = PrivateAttr()
def __init__(
self,
collection_name: str = "default",
record_type: str = "Record",
):
"""
Initialize GelVectorStore.
Args:
collection_name: Name of the collection to store vectors in
record_type: The record type name in Gel schema
"""
super().__init__(
collection_name=collection_name,
record_type=record_type,
)
self._sync_client = None
self._async_client = None
def get_sync_client(self):
"""Get or initialize a synchronous Gel client."""
if self._async_client is not None:
raise RuntimeError(
"GelVectorStore has already been used in async mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._sync_client is None:
self._sync_client = gel.create_client()
try:
self._sync_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
self._sync_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._sync_client
async def get_async_client(self):
"""Get or initialize an asynchronous Gel client."""
if self._sync_client is not None:
raise RuntimeError(
"GelVectorStore has already been used in sync mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._async_client is None:
self._async_client = gel.create_async_client()
try:
await self._async_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
await self._async_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._async_client
@property
def client(self) -> Any:
"""Get client."""
return self.get_sync_client()
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
assert filters is None, "Filters are not supported in get_nodes"
if node_ids is None:
return []
client = self.get_sync_client()
results = client.query(
SELECT_BY_DOC_ID_QUERY.render(record_type=self.record_type),
external_ids=node_ids,
)
return [
TextNode(
id_=result.external_id,
text=result.text,
metadata=json.loads(result.metadata),
embedding=result.embedding,
)
for result in results
]
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Async version of get_nodes."""
assert filters is None, "Filters are not supported in get_nodes"
if node_ids is None:
return []
client = await self.get_async_client()
results = await client.query(
SELECT_BY_DOC_ID_QUERY.render(record_type=self.record_type),
external_ids=node_ids,
)
return [
TextNode(
id_=result.external_id,
text=result.text,
metadata=json.loads(result.metadata),
embedding=result.embedding,
)
for result in results
]
def add(
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]:
"""Add nodes to vector store."""
inserted_ids = []
client = self.get_sync_client()
for node in nodes:
result = client.query(
INSERT_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
external_id=node.id_,
text=node.get_content(),
embedding=node.embedding,
metadata=json.dumps(node.metadata),
)
inserted_ids.append(result[0].external_id)
return inserted_ids
async def async_add(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]:
"""Async version of add."""
inserted_ids = []
client = await self.get_async_client()
for node in nodes:
result = await client.query(
INSERT_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
external_id=node.id_,
text=node.get_content(),
embedding=node.embedding,
metadata=json.dumps(node.metadata),
)
inserted_ids.append(result[0].external_id)
return inserted_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes using with ref_doc_id."""
client = self.get_sync_client()
result = client.query(
DELETE_BY_IDS_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
external_ids=[ref_doc_id],
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async version of delete."""
client = await self.get_async_client()
result = await client.query(
DELETE_BY_IDS_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
external_ids=[ref_doc_id],
)
def clear(self) -> None:
"""Clear all nodes from configured vector store."""
client = self.get_sync_client()
result = client.query(
DELETE_ALL_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
)
async def aclear(self) -> None:
"""Clear all nodes from configured vector store."""
client = await self.get_async_client()
result = await client.query(
DELETE_ALL_QUERY.render(record_type=self.record_type),
collection_name=self.collection_name,
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
assert query.query_embedding is not None, "query_embedding is required"
filter_clause = (
"filter " + get_filter_clause(query.filters) if query.filters else ""
)
assert query.mode == VectorStoreQueryMode.DEFAULT
rendered_query = COSINE_SIMILARITY_QUERY.render(
record_type=self.record_type, filter_clause=filter_clause
)
client = self.get_sync_client()
results = client.query(
rendered_query,
query_embedding=query.query_embedding,
collection_name=self.collection_name,
limit=query.similarity_top_k,
)
return VectorStoreQueryResult(
nodes=[
TextNode(
id_=result.external_id,
text=result.text,
metadata=json.loads(result.metadata),
embedding=result.embedding,
)
for result in results
],
similarities=[result.cosine_similarity for result in results],
ids=[result.external_id for result in results],
)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""Async version of query."""
assert query.query_embedding is not None, "query_embedding is required"
filter_clause = (
"filter " + get_filter_clause(query.filters) if query.filters else ""
)
assert query.mode == VectorStoreQueryMode.DEFAULT
rendered_query = COSINE_SIMILARITY_QUERY.render(
record_type=self.record_type, filter_clause=filter_clause
)
client = await self.get_async_client()
results = await client.query(
rendered_query,
query_embedding=query.query_embedding,
collection_name=self.collection_name,
limit=query.similarity_top_k,
)
return VectorStoreQueryResult(
nodes=[
TextNode(
id_=result.external_id,
text=result.text,
metadata=json.loads(result.metadata),
embedding=result.embedding,
)
for result in results
],
similarities=[result.cosine_similarity for result in results],
ids=[result.external_id for result in results],
)
def persist(self, persist_path: str, fs) -> None:
_logger.warning("GelVectorStore.persist() is a no-op")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-gel/llama_index/vector_stores/gel/base.py",
"license": "MIT License",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-gel/tests/test_gel.py | import subprocess
import pytest
import pytest_asyncio
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
FilterOperator,
FilterCondition,
MetadataFilters,
MetadataFilter,
)
from llama_index.vector_stores.gel import GelVectorStore, get_filter_clause
try:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
NODES = [
TextNode(
id_="1",
text="there are cats in the pond",
metadata={"location": "pond", "topic": "animals"},
embedding=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
),
TextNode(
id_="2",
text="ducks are also found in the pond",
metadata={"location": "pond", "topic": "animals"},
embedding=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
),
TextNode(
id_="3",
text="fresh apples are available at the market",
metadata={"location": "market", "topic": "food"},
embedding=[0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
),
TextNode(
id_="4",
text="the market also sells fresh oranges",
metadata={"location": "market", "topic": "food"},
embedding=[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
),
TextNode(
id_="5",
text="the new art exhibit is fascinating",
metadata={"location": "museum", "topic": "art"},
embedding=[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
),
TextNode(
id_="6",
text="a sculpture exhibit is also at the museum",
metadata={"location": "museum", "topic": "art"},
embedding=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
),
TextNode(
id_="7",
text="a new coffee shop opened on Main Street",
metadata={"location": "Main Street", "topic": "food"},
embedding=[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
),
TextNode(
id_="8",
text="the book club meets at the library",
metadata={"location": "library", "topic": "reading"},
embedding=[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
),
TextNode(
id_="9",
text="the library hosts a weekly story time for kids",
metadata={"location": "library", "topic": "reading"},
embedding=[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
),
TextNode(
id_="10",
text="a cooking class for beginners is offered at the community center",
metadata={"location": "community center", "topic": "classes"},
embedding=[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
),
]
@pytest.fixture()
def vectorstore() -> GelVectorStore:
vectorstore = GelVectorStore()
vectorstore.clear()
return vectorstore
@pytest_asyncio.fixture()
async def vectorstore_async() -> GelVectorStore:
vectorstore = GelVectorStore()
await vectorstore.aclear()
return vectorstore
def test_get_filter_clause():
test_cases = [
(
MetadataFilters(
filters=[
MetadataFilter(
key="field", value="value", operator=FilterOperator.EQ.value
)
]
),
'<str>json_get(.metadata, "field") = "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field", value=1, operator=FilterOperator.EQ.value
)
]
),
'<str>json_get(.metadata, "field") = 1',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field", value="value", operator=FilterOperator.NE.value
)
]
),
'<str>json_get(.metadata, "field") != "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field", value="value", operator=FilterOperator.LT.value
)
]
),
'<str>json_get(.metadata, "field") < "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field",
value="value",
operator=FilterOperator.LTE.value,
)
]
),
'<str>json_get(.metadata, "field") <= "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field", value="value", operator=FilterOperator.GT.value
)
]
),
'<str>json_get(.metadata, "field") > "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field",
value="value",
operator=FilterOperator.GTE.value,
)
]
),
'<str>json_get(.metadata, "field") >= "value"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field",
value=[1, 2, 3],
operator=FilterOperator.IN.value,
)
]
),
'<str>json_get(.metadata, "field") in array_unpack([1, 2, 3])',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field",
value=[1, 2, 3],
operator=FilterOperator.NIN.value,
)
]
),
'<str>json_get(.metadata, "field") not in array_unpack([1, 2, 3])',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field",
value="pattern",
operator=FilterOperator.TEXT_MATCH.value,
)
]
),
'<str>json_get(.metadata, "field") like "pattern"',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field1",
value="value1",
operator=FilterOperator.EQ.value,
),
MetadataFilter(
key="field2",
value="value2",
operator=FilterOperator.EQ.value,
),
],
condition=FilterCondition.AND,
),
'(<str>json_get(.metadata, "field1") = "value1" and <str>json_get(.metadata, "field2") = "value2")',
),
(
MetadataFilters(
filters=[
MetadataFilter(
key="field1",
value="value1",
operator=FilterOperator.EQ.value,
),
MetadataFilter(
key="field2",
value="value2",
operator=FilterOperator.EQ.value,
),
],
condition=FilterCondition.OR,
),
'(<str>json_get(.metadata, "field1") = "value1" or <str>json_get(.metadata, "field2") = "value2")',
),
(
MetadataFilters(
filters=[
MetadataFilters(
filters=[
MetadataFilter(
key="field1",
value=[1, 2, 3],
operator=FilterOperator.IN.value,
),
MetadataFilter(
key="field2",
value=100,
operator=FilterOperator.GT.value,
),
],
condition=FilterCondition.OR,
),
MetadataFilter(
key="field3",
value="%pattern%",
operator=FilterOperator.TEXT_MATCH.value,
),
],
condition=FilterCondition.AND,
),
'((<str>json_get(.metadata, "field1") in array_unpack([1, 2, 3]) or <str>json_get(.metadata, "field2") > 100) and <str>json_get(.metadata, "field3") like "%pattern%")',
),
]
for filter_dict, expected in test_cases:
assert get_filter_clause(filter_dict) == expected
def test_add(vectorstore: GelVectorStore):
inserted_ids = vectorstore.add(NODES)
assert len(inserted_ids) == len(NODES)
for node in NODES:
assert node.id_ in inserted_ids
vectorstore.clear()
def test_query(vectorstore: GelVectorStore):
inserted_ids = vectorstore.add(NODES)
query = VectorStoreQuery(
query_embedding=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
similarity_top_k=1,
)
results = vectorstore.query(query)
assert len(results.nodes) == 1
assert results.nodes[0].id_ == "1"
filters = MetadataFilters(
filters=[
MetadataFilter(key="location", value="market", operator=FilterOperator.EQ),
MetadataFilter(
key="topic", value=["food", "art"], operator=FilterOperator.IN
),
],
condition=FilterCondition.AND,
)
results = vectorstore.query(
VectorStoreQuery(
query_embedding=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
similarity_top_k=1,
filters=filters,
)
)
assert "1" not in [r.id_ for r in results.nodes]
vectorstore.clear()
def test_get_nodes(vectorstore: GelVectorStore):
inserted_ids = vectorstore.add(NODES)
results = vectorstore.get_nodes(node_ids=["1", "2"])
assert len(results) == 2
assert results[0].id_ == "1"
assert results[1].id_ == "2"
assert len(vectorstore.get_nodes()) == 0
def test_delete(vectorstore: GelVectorStore):
inserted_ids = vectorstore.add(NODES)
vectorstore.delete(ref_doc_id="1")
results = vectorstore.get_nodes(node_ids=["1"])
assert len(results) == 0
def test_clear(vectorstore: GelVectorStore):
inserted_ids = vectorstore.add(NODES)
vectorstore.clear()
assert len(vectorstore.get_nodes()) == 0
async def test_async_add(vectorstore_async: GelVectorStore):
inserted_ids = await vectorstore_async.async_add(NODES)
assert len(inserted_ids) == len(NODES)
for node in NODES:
assert node.id_ in inserted_ids
await vectorstore_async.aclear()
async def test_aquery(vectorstore_async: GelVectorStore):
inserted_ids = await vectorstore_async.async_add(NODES)
query = VectorStoreQuery(
query_embedding=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
similarity_top_k=1,
)
results = await vectorstore_async.aquery(query)
assert len(results.nodes) == 1
assert results.nodes[0].id_ == "1"
await vectorstore_async.aclear()
async def test_aget_nodes(vectorstore_async: GelVectorStore):
inserted_ids = await vectorstore_async.async_add(NODES)
results = await vectorstore_async.aget_nodes(node_ids=["1", "2"])
assert len(results) == 2
assert results[0].id_ == "1"
assert results[1].id_ == "2"
assert len(await vectorstore_async.aget_nodes()) == 0
await vectorstore_async.aclear()
async def test_adelete(vectorstore_async: GelVectorStore):
inserted_ids = await vectorstore_async.async_add(NODES)
await vectorstore_async.adelete(ref_doc_id="1")
results = await vectorstore_async.aget_nodes(node_ids=["1"])
assert len(results) == 0
await vectorstore_async.aclear()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-gel/tests/test_gel.py",
"license": "MIT License",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-gel/tests/test_vector_stores_gel.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.gel import GelVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in GelVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-gel/tests/test_vector_stores_gel.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-autoembeddings/llama_index/embeddings/autoembeddings/base.py | from typing import List
from llama_index.core.base.embeddings.base import BaseEmbedding
from typing import Optional
try:
import chonkie
from chonkie import AutoEmbeddings
except ImportError:
raise ImportError(
"Could not import Autembeddings from chonkie. "
"Please install it with `pip install chonkie[all]`."
)
class ChonkieAutoEmbedding(BaseEmbedding):
"""
Autoembeddings from chonkie.
Args:
model_name (str): The name of the model to use.
"""
model_name: str
embedder: Optional[chonkie.BaseEmbeddings] = None
def __init__(self, model_name: str) -> None:
super().__init__(model_name=model_name)
self.embedder = AutoEmbeddings.get_embeddings(self.model_name)
@classmethod
def class_name(cls) -> str:
return "ChonkieAutoEmbedding"
def _get_embedding(self, text: str) -> List[float]:
embed = self.embedder.embed(text)
return embed.tolist()
async def _aget_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_embeddings(self, texts: List[str]) -> List[List[float]]:
embeds = self.embedder.embed_batch(texts)
return [e.tolist() for e in embeds]
async def _aget_embeddings(
self,
texts: List[str],
) -> List[List[float]]:
return self._get_embeddings(texts)
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return await self._aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(text)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-autoembeddings/llama_index/embeddings/autoembeddings/base.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-autoembeddings/tests/test_autoembeddings.py | from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.autoembeddings import ChonkieAutoEmbedding
def test_class_init() -> None:
emb = ChonkieAutoEmbedding(model_name="all-MiniLM-L6-v2")
assert isinstance(emb, BaseEmbedding)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-autoembeddings/tests/test_autoembeddings.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-markitdown/llama_index/readers/markitdown/base.py | from markitdown import MarkItDown
from llama_index.core.bridge.pydantic import BaseModel, model_validator
import os
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from typing import Tuple, Optional, Union, List
from typing_extensions import Self
def is_empty(list_it: list) -> Tuple[bool, Optional[list]]:
if len(list_it) == 0:
return True, None
return False, list_it
class ValidFilePath(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not Path(self.file_path).is_dir():
if not Path(self.file_path).is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
if not self.file_path.is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl.__str__())
self.file_path = files
return self
class MarkItDownReader(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
**kwargs,
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(
Document(
text=res.text_content,
metadata={
"file_path": f.__str__(),
"file_type": os.path.splitext(f)[1],
"content_length": len(res.text_content),
},
)
)
return docs
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-markitdown/llama_index/readers/markitdown/base.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-markitdown/tests/test_markitdownreader.py | from llama_index.core.readers.base import BaseReader
from llama_index.readers.markitdown import MarkItDownReader
def test_class():
reader = MarkItDownReader()
assert isinstance(reader, BaseReader)
assert reader._reader is not None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-markitdown/tests/test_markitdownreader.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-meta/llama_index/llms/meta/base.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class LlamaLLM(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import LlamaLLM
# set api key in env or in llm
# import os
# os.environ["LLAMA_API_KEY"] = "your api key"
llm = LlamaLLM(
model="Llama-3.3-8B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str = "Llama-3.3-8B-Instruct",
api_key: Optional[str] = None,
api_base: str = "https://api.llama.com/compat/v1",
is_chat_model: bool = True,
# Slightly lower to account for tokenization defaults
context_window: int = 120000,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("LLAMA_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
context_window=context_window,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "LlamaLLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-meta/llama_index/llms/meta/base.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-meta/tests/test_llms_llama.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.meta import LlamaLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in LlamaLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-meta/tests/test_llms_llama.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-openvino-genai/llama_index/embeddings/openvino_genai/base.py | from typing import Any, List, Optional
from pathlib import Path
import numpy as np
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.huggingface.utils import format_query, format_text
class OpenVINOGENAIEmbedding(BaseEmbedding):
model_path: str = Field(description="local path.")
max_length: int = Field(description="Maximum length of input.")
pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].")
normalize: bool = Field(default=True, description="Normalize embeddings or not.")
query_instruction: Optional[str] = Field(
description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
description="Instruction to prepend to text."
)
cache_folder: Optional[str] = Field(
description="Cache folder for huggingface files.", default=None
)
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_device: Any = PrivateAttr()
def __init__(
self,
model_path: str,
pooling: str = "cls",
max_length: int = 2048,
normalize: bool = True,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
device: Optional[str] = "CPU",
):
try:
import openvino_genai
import openvino as ov
core = ov.Core()
except ImportError as e:
raise ImportError(
"Could not import openvino_genai python package. "
"Please install it with: "
"pip install -U openvino_genai"
) from e
# use local model
model = model or core.compile_model(
Path(model_path) / "openvino_model.xml", device
)
tokenizer = tokenizer or openvino_genai.Tokenizer(model_path)
if pooling not in ["cls", "mean"]:
raise ValueError(f"Pooling {pooling} not supported.")
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager or CallbackManager([]),
model_path=model_path,
max_length=max_length,
pooling=pooling,
normalize=normalize,
query_instruction=query_instruction,
text_instruction=text_instruction,
)
self._device = device
self._model = model
self._tokenizer = tokenizer
@classmethod
def class_name(cls) -> str:
return "OpenVINOGENAIEmbedding"
def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any:
"""Mean Pooling - Take attention mask into account for correct averaging."""
token_embeddings = model_output[
0
] # First element of model_output contains all token embeddings
input_mask_expanded = np.broadcast_to(
np.expand_dims(attention_mask, axis=-1), token_embeddings.size()
)
return np.sum(token_embeddings * input_mask_expanded, 1) / np.clip(
input_mask_expanded.sum(1), a_min=1e-9
)
def _cls_pooling(self, model_output: list) -> Any:
"""Use the CLS token as the pooling token."""
return model_output[0][:, 0]
def _embed(self, sentences: List[str]) -> List[List[float]]:
"""Embed sentences."""
length = self._model.inputs[0].get_partial_shape()[1]
if length.is_dynamic:
features = self._tokenizer.encode(sentences)
else:
features = self._tokenizer.encode(
sentences,
pad_to_max_length=True,
max_length=length.get_length(),
)
if "token_type_ids" in (input.any_name for input in self._model.inputs):
token_type_ids = np.zeros(features.attention_mask.shape)
model_input = {
"input_ids": features.input_ids,
"attention_mask": features.attention_mask,
"token_type_ids": token_type_ids,
}
else:
model_input = {
"input_ids": features.input_ids,
"attention_mask": features.attention_mask,
}
model_output = self._model(model_input)
if self.pooling == "cls":
embeddings = self._cls_pooling(model_output)
else:
embeddings = self._mean_pooling(model_output, model_input["attention_mask"])
if self.normalize:
norm = np.linalg.norm(embeddings, ord=2, axis=1, keepdims=True)
embeddings = embeddings / norm
return embeddings.tolist()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
query = format_query(query, self.model_name, self.query_instruction)
return self._embed([query])[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding async."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get text embedding async."""
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
text = format_text(text, self.model_name, self.text_instruction)
return self._embed([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
texts = [
format_text(text, self.model_name, self.text_instruction) for text in texts
]
return self._embed(texts)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-openvino-genai/llama_index/embeddings/openvino_genai/base.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_llms_google_genai_vertex.py | from datetime import datetime
from enum import Enum
import os
from typing import List, Optional, Union
from unittest.mock import MagicMock
import pytest
from llama_index.core.program.function_program import get_function_tool
from pydantic import BaseModel, Field
from google.genai import types
from llama_index.llms.google_genai import GoogleGenAI
from llama_index.llms.google_genai.utils import (
convert_schema_to_function_declaration,
chat_from_gemini_response,
)
# Don't forget to export GOOGLE_CLOUD_LOCATION and GOOGLE_CLOUD_PROJECT when testing with VertexAI
SKIP_VERTEXAI = os.environ.get("GOOGLE_GENAI_USE_VERTEXAI", "false") == "false"
@pytest.mark.skipif(
SKIP_VERTEXAI,
reason="GOOGLE_GENAI_USE_VERTEXAI not set",
)
def test_anyof_supported_vertexai() -> None:
class Content(BaseModel):
content: Union[int, str]
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
)
function_tool = get_function_tool(Content)
_ = convert_schema_to_function_declaration(llm._client, function_tool)
content = (
llm.as_structured_llm(output_cls=Content)
.complete(prompt="Generate a small content")
.raw
)
assert isinstance(content, Content)
assert isinstance(content.content, int | str)
@pytest.mark.skipif(
SKIP_VERTEXAI,
reason="GOOGLE_GENAI_USE_VERTEXAI not set",
)
def test_optional_lists_nested_vertexai() -> None:
class Address(BaseModel):
street: str
city: str
country: str = Field(default="USA")
class ContactInfo(BaseModel):
email: str
phone: Optional[str] = None
address: Address
class Department(Enum):
ENGINEERING = "engineering"
MARKETING = "marketing"
SALES = "sales"
HR = "human_resources"
class Employee(BaseModel):
name: str
contact: ContactInfo
department: Department
hire_date: datetime
class Company(BaseModel):
name: str
founded_year: int
website: str
employees: List[Employee]
headquarters: Address
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
)
function_tool = get_function_tool(Company)
converted = convert_schema_to_function_declaration(llm._client, function_tool)
assert converted.name == "Company"
assert converted.description is not None
assert converted.parameters.required is not None
assert list(converted.parameters.properties) == [
"name",
"founded_year",
"website",
"employees",
"headquarters",
]
assert "name" in converted.parameters.required
assert "founded_year" in converted.parameters.required
assert "website" in converted.parameters.required
assert "employees" in converted.parameters.required
assert "headquarters" in converted.parameters.required
# call the model and check the output
company = (
llm.as_structured_llm(output_cls=Company)
.complete(prompt="Create a fake company with at least 3 employees")
.raw
)
assert isinstance(company, Company)
assert len(company.employees) >= 3
assert all(
employee.department in Department.__members__.values()
for employee in company.employees
)
@pytest.mark.skipif(
SKIP_VERTEXAI,
reason="GOOGLE_GENAI_USE_VERTEXAI not set",
)
def test_cached_content_initialization_vertexai() -> None:
"""Test GoogleGenAI initialization with cached_content parameter in VertexAI."""
cached_content_value = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
llm = GoogleGenAI(model="gemini-2.0-flash-001", cached_content=cached_content_value)
# Verify cached_content is stored in the instance
assert llm.cached_content == cached_content_value
# Verify cached_content is stored in generation config
assert llm._generation_config["cached_content"] == cached_content_value
def test_cached_content_in_response_vertexai() -> None:
"""Test that cached_content is extracted from Gemini responses in VertexAI."""
# Mock response with cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[0].text = "Test response"
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].thought = False
mock_response.candidates[0].content.parts[0].function_call.id = ""
mock_response.candidates[0].content.parts[0].function_call.name = "hello"
mock_response.candidates[0].content.parts[0].function_call.args = {}
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
mock_response.cached_content = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify cached_content is in raw response
assert "cached_content" in chat_response.raw
assert (
chat_response.raw["cached_content"]
== "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
)
def test_cached_content_without_cached_content_vertexai() -> None:
"""Test response processing when cached_content is not present in VertexAI."""
# Mock response without cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[0].text = "Test response"
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].thought = False
mock_response.candidates[0].content.parts[0].function_call.id = ""
mock_response.candidates[0].content.parts[0].function_call.name = "hello"
mock_response.candidates[0].content.parts[0].function_call.args = {}
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
# No cached_content attribute
del mock_response.cached_content
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify no cached_content key in raw response
assert "cached_content" not in chat_response.raw
@pytest.mark.skipif(
SKIP_VERTEXAI,
reason="GOOGLE_GENAI_USE_VERTEXAI not set",
)
def test_cached_content_with_generation_config_vertexai() -> None:
"""Test that cached_content works with custom generation_config in VertexAI."""
cached_content_value = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-456"
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
cached_content=cached_content_value,
generation_config=types.GenerateContentConfig(
temperature=0.5,
cached_content=cached_content_value,
),
)
# Verify both cached_content and custom config are preserved
assert llm._generation_config["cached_content"] == cached_content_value
assert llm._generation_config["temperature"] == 0.5
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_llms_google_genai_vertex.py",
"license": "MIT License",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/shipping/mutations/delivery_options_calculate.py | import graphene
from ....checkout.delivery_context import fetch_shipping_methods_for_checkout
from ....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ....webhook.event_types import WebhookEventSyncType
from ...checkout import types as checkout_types
from ...core.descriptions import ADDED_IN_323
from ...core.doc_category import DOC_CATEGORY_SHIPPING
from ...core.mutations import BaseMutation
from ...core.types.common import DeliveryOptionsCalculateError, NonNullList
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ...utils import get_user_or_app_from_context
class DeliveryOptionsCalculate(BaseMutation):
deliveries = NonNullList(
checkout_types.Delivery,
required=True,
default_value=[],
description="List of the available deliveries.",
)
class Arguments:
id = graphene.ID(
description="The ID of the checkout.",
required=True,
)
class Meta:
description = (
"Calculates available delivery options for a checkout." + ADDED_IN_323
)
doc_category = DOC_CATEGORY_SHIPPING
error_type_class = DeliveryOptionsCalculateError
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
description="Triggered to fetch external shipping methods.",
),
WebhookEventInfo(
type=WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
description="Triggered to filter shipping methods.",
),
]
@classmethod
def perform_mutation(cls, _root, info, /, *, id): # type: ignore[override]
manager = get_plugin_manager_promise(info.context).get()
requestor = get_user_or_app_from_context(info.context)
checkout = cls.get_node_or_error(
info=info, node_id=id, field="id", only_type=checkout_types.Checkout
)
checkout_lines, _ = fetch_checkout_lines(checkout=checkout)
checkout_info = fetch_checkout_info(
checkout=checkout,
lines=checkout_lines,
manager=manager,
)
return cls(
deliveries=fetch_shipping_methods_for_checkout(
checkout_info,
requestor=requestor,
# Using mutation means, that we use new approach for fetching
# delivery. In new flow we never modify anything for assigned
# delivery
overwrite_assigned_delivery=False,
)
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/shipping/mutations/delivery_options_calculate.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/shipping/tests/mutations/test_delivery_options_calculate.py | import datetime
from decimal import Decimal
from unittest import mock
import graphene
from django.utils import timezone
from prices import Money
from promise import Promise
from .....checkout.delivery_context import fetch_shipping_methods_for_checkout
from .....checkout.models import Checkout, CheckoutDelivery
from .....checkout.webhooks.exclude_shipping import (
excluded_shipping_methods_for_checkout,
)
from .....checkout.webhooks.list_shipping_methods import (
list_shipping_methods_for_checkout,
)
from .....shipping.error_codes import DeliveryOptionsCalculateErrorCode
from .....shipping.interface import ShippingMethodData
from .....shipping.models import ShippingMethod
from .....webhook.transport.shipping_helpers import to_shipping_app_id
from ....core.utils import to_global_id_or_none
from ....tests.utils import get_graphql_content
DELIVERY_OPTIONS_CALCULATE = """
mutation DeliveryOptionsCalculate($id: ID!) {
deliveryOptionsCalculate(id: $id) {
deliveries {
id
shippingMethod {
name
}
}
errors {
field
message
code
}
}
}
"""
def test_used_with_different_type_than_checkout(api_client, address):
# given
invalid_id = graphene.Node.to_global_id("Address", address.id)
variables = {"id": invalid_id}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response, ignore_errors=True)
errors = content["data"]["deliveryOptionsCalculate"]["errors"]
# then
assert len(errors) == 1
assert errors[0]["field"] == "id"
assert errors[0]["code"] == DeliveryOptionsCalculateErrorCode.GRAPHQL_ERROR.name
def test_checkout_not_found(api_client):
# given
assert Checkout.objects.count() == 0
variables = {
"id": graphene.Node.to_global_id(
"Checkout", "00000000-0000-0000-0000-000000000000"
)
}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response, ignore_errors=True)
errors = content["data"]["deliveryOptionsCalculate"]["errors"]
# then
assert len(errors) == 1
assert errors[0]["field"] == "id"
assert errors[0]["code"] == DeliveryOptionsCalculateErrorCode.NOT_FOUND.name
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetches_external_shipping_methods(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
api_client,
checkout_with_item,
address,
app,
):
# given
checkout = checkout_with_item
ShippingMethod.objects.all().delete()
expected_name = "External Shipping"
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-id"),
price=Money(Decimal(10), checkout.currency),
name=expected_name,
description="External Shipping Description",
active=True,
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={},
)
mocked_list_shipping_methods.return_value = Promise.resolve(
[available_shipping_method]
)
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
delivery = CheckoutDelivery.objects.get()
assert not data["errors"]
assert len(data["deliveries"]) == 1
assert data["deliveries"][0]["shippingMethod"]["name"] == expected_name
assert data["deliveries"][0]["id"] == graphene.Node.to_global_id(
"CheckoutDelivery", delivery.pk
)
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_excluded_shipping_methods_called_for_checkout(
mocked_list_shipping_methods,
mocked_excluded_shipping_methods_for_checkout,
api_client,
checkout_with_item,
address,
app,
):
# given
checkout = checkout_with_item
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-id"),
price=Money(Decimal(10), checkout.currency),
name="External Shipping",
description="External Shipping Description",
active=True,
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={},
)
mocked_list_shipping_methods.return_value = Promise.resolve(
[available_shipping_method]
)
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
assert not data["errors"]
mocked_excluded_shipping_methods_for_checkout.assert_called_once()
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout",
wraps=list_shipping_methods_for_checkout,
)
def test_when_checkout_has_stale_deliveries(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
api_client,
checkout_with_item,
address,
checkout_delivery,
):
# given
checkout = checkout_with_item
existing_delivery = checkout_delivery(checkout)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
assert not data["errors"]
assert len(data["deliveries"]) == 1
existing_delivery.refresh_from_db()
assert existing_delivery.is_valid
assert data["deliveries"][0]["shippingMethod"]["name"] == existing_delivery.name
assert CheckoutDelivery.objects.filter(checkout=checkout).count() == 1
assert mocked_exclude_shipping_methods.called
assert mocked_list_shipping_methods.called
@mock.patch(
"saleor.graphql.shipping.mutations.delivery_options_calculate.fetch_shipping_methods_for_checkout",
wraps=fetch_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout",
wraps=list_shipping_methods_for_checkout,
)
def test_refresh_deliveries_when_delivery_methods_stale_at_has_future_date(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
mocked_fetch_shipping_methods,
api_client,
checkout_with_item,
address,
checkout_delivery,
):
# given
checkout = checkout_with_item
existing_delivery = checkout_delivery(checkout)
checkout.shipping_address = address
checkout.assigned_delivery = existing_delivery
checkout.delivery_methods_stale_at = timezone.now() + datetime.timedelta(minutes=5)
checkout.save()
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
assert not data["errors"]
assert len(data["deliveries"]) == 1
existing_delivery.refresh_from_db()
assert existing_delivery.is_valid
assert data["deliveries"][0]["shippingMethod"]["name"] == existing_delivery.name
assert CheckoutDelivery.objects.filter(checkout=checkout).count() == 1
assert mocked_fetch_shipping_methods.called
assert mocked_list_shipping_methods.called
assert mocked_exclude_shipping_methods.called
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout",
wraps=list_shipping_methods_for_checkout,
)
def test_when_refreshed_delivery_has_different_details(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
api_client,
checkout_with_item,
address,
checkout_delivery,
):
# given
checkout = checkout_with_item
assigned_delivery = checkout_delivery(checkout)
checkout.assigned_delivery = assigned_delivery
checkout.shipping_address = address
checkout.save()
expected_name = assigned_delivery.name
assigned_delivery.name = "PreviousName"
assigned_delivery.save()
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
refreshed_delivery = CheckoutDelivery.objects.exclude(id=assigned_delivery.id).get()
assigned_delivery.refresh_from_db()
assert not assigned_delivery.is_valid
assert (
assigned_delivery.built_in_shipping_method_id
== refreshed_delivery.built_in_shipping_method_id
)
assert refreshed_delivery.is_valid
assert not data["errors"]
assert len(data["deliveries"]) == 1
assert data["deliveries"][0]["id"] == graphene.Node.to_global_id(
"CheckoutDelivery", refreshed_delivery.pk
)
assert data["deliveries"][0]["shippingMethod"]["name"] == expected_name
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
assert mocked_list_shipping_methods.called
assert mocked_exclude_shipping_methods.called
@mock.patch(
"saleor.graphql.shipping.mutations.delivery_options_calculate.fetch_shipping_methods_for_checkout",
wraps=fetch_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout",
wraps=excluded_shipping_methods_for_checkout,
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout",
wraps=list_shipping_methods_for_checkout,
)
def test_refresh_deliveries_when_assigned_delivery_is_none(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
mocked_fetch_shipping_methods,
api_client,
checkout_with_item,
address,
checkout_delivery,
):
# given
checkout = checkout_with_item
existing_delivery = checkout_delivery(checkout)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() + datetime.timedelta(minutes=5)
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
assert not checkout.assigned_delivery
variables = {"id": to_global_id_or_none(checkout)}
# when
response = api_client.post_graphql(DELIVERY_OPTIONS_CALCULATE, variables)
content = get_graphql_content(response)
data = content["data"]["deliveryOptionsCalculate"]
# then
assert not data["errors"]
assert len(data["deliveries"]) == 1
checkout.refresh_from_db()
assert not checkout.assigned_delivery
existing_delivery.refresh_from_db()
assert existing_delivery.is_valid
assert data["deliveries"][0]["shippingMethod"]["name"] == existing_delivery.name
assert CheckoutDelivery.objects.filter(checkout=checkout).count() == 1
assert mocked_fetch_shipping_methods.called
assert mocked_list_shipping_methods.called
assert mocked_exclude_shipping_methods.called
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/shipping/tests/mutations/test_delivery_options_calculate.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/app/manifest_schema.py | import mimetypes
from django.core.exceptions import ValidationError as DjangoValidationError
from pydantic import BaseModel, ConfigDict, field_validator
from pydantic.alias_generators import to_camel
from pydantic_core import PydanticCustomError
from ..thumbnail import ICON_MIME_TYPES
from ..webhook.response_schemas.utils.annotations import DefaultIfNone
from .error_codes import AppErrorCode
from .types import DEFAULT_APP_TARGET
from .validators import AppURLValidator, image_url_validator
_CAMEL_CONFIG = ConfigDict(
extra="ignore",
populate_by_name=True,
alias_generator=to_camel,
)
class ManifestBrandLogoSchema(BaseModel):
model_config = _CAMEL_CONFIG
default: str
@field_validator("default")
@classmethod
def validate_logo_url(cls, v: str) -> str:
try:
image_url_validator(v)
except DjangoValidationError as e:
raise PydanticCustomError(
AppErrorCode.INVALID_URL_FORMAT.value,
"Incorrect value for field: logo.default.",
{"error_code": AppErrorCode.INVALID_URL_FORMAT.value},
) from e
filetype = mimetypes.guess_type(v)[0]
if filetype not in ICON_MIME_TYPES:
raise PydanticCustomError(
AppErrorCode.INVALID_URL_FORMAT.value,
"Invalid file type for field: logo.default.",
{"error_code": AppErrorCode.INVALID_URL_FORMAT.value},
)
return v
class ManifestBrandSchema(BaseModel):
model_config = _CAMEL_CONFIG
logo: ManifestBrandLogoSchema
class ManifestExtensionSchema(BaseModel):
model_config = _CAMEL_CONFIG
label: str
url: str
mount: str
target: str = DEFAULT_APP_TARGET
permissions: list[str] = []
options: dict = {}
class ManifestWebhookSchema(BaseModel):
model_config = _CAMEL_CONFIG
name: str
target_url: str
query: str
is_active: bool = True
async_events: list[str] = []
sync_events: list[str] = []
custom_headers: dict | None = None
@field_validator("target_url")
@classmethod
def validate_target_url(cls, v: str) -> str:
url_validator = AppURLValidator(schemes=["http", "https", "awssqs", "gcpubsub"])
try:
url_validator(v)
except (DjangoValidationError, AttributeError) as e:
raise PydanticCustomError(
AppErrorCode.INVALID_URL_FORMAT.value,
"Invalid target url.",
{"error_code": AppErrorCode.INVALID_URL_FORMAT.value},
) from e
return v
class ManifestSchema(BaseModel):
model_config = _CAMEL_CONFIG
id: str
name: str
version: str
about: str | None = None
permissions: list[str] = []
app_url: str | None = None
token_target_url: str | None = None
data_privacy_url: str | None = None
homepage_url: str | None = None
support_url: str | None = None
audience: str | None = None
required_saleor_version: str | None = None
author: str | None = None
brand: ManifestBrandSchema | None = None
extensions: DefaultIfNone[list[ManifestExtensionSchema]] = []
webhooks: DefaultIfNone[list[ManifestWebhookSchema]] = []
@field_validator("token_target_url")
@classmethod
def validate_token_target_url(cls, v: str | None) -> str | None:
if v is None:
return None
url_validator = AppURLValidator()
try:
url_validator(v)
except (DjangoValidationError, AttributeError) as e:
raise PydanticCustomError(
AppErrorCode.INVALID_URL_FORMAT.value,
"Invalid target url.",
{"error_code": AppErrorCode.INVALID_URL_FORMAT.value},
) from e
return v
@field_validator("author")
@classmethod
def validate_author(cls, v: str | None) -> str | None:
if v is None:
return None
if clean := v.strip():
return clean
raise PydanticCustomError(
AppErrorCode.INVALID.value,
"Incorrect value for field: author",
{"error_code": AppErrorCode.INVALID.value},
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/app/manifest_schema.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/app/tests/test_manifest_validations.py | import pytest
from pydantic import ValidationError as PydanticValidationError
from ..error_codes import AppErrorCode
from ..manifest_schema import ICON_MIME_TYPES, ManifestSchema
MINIMAL_MANIFEST = {
"id": "app.example",
"name": "My App",
"version": "1.0.0",
}
def test_manifest_schema_valid_minimal():
# given / when
schema = ManifestSchema.model_validate(MINIMAL_MANIFEST)
# then
assert schema.id == MINIMAL_MANIFEST["id"]
assert schema.name == MINIMAL_MANIFEST["name"]
assert schema.version == MINIMAL_MANIFEST["version"]
def test_manifest_schema_missing_required_fields():
# given
manifest_data = {}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
error_fields = {e["loc"][0] for e in errors}
assert "id" in error_fields
assert "name" in error_fields
assert "version" in error_fields
def test_manifest_schema_invalid_token_target_url():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"tokenTargetUrl": "not-a-valid-url",
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
assert len(errors) == 1
assert errors[0]["loc"][0] == "tokenTargetUrl"
assert errors[0]["ctx"]["error_code"] == AppErrorCode.INVALID_URL_FORMAT.value
def test_manifest_schema_valid_token_target_url():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"tokenTargetUrl": "https://example.com/register",
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.token_target_url == "https://example.com/register"
def test_manifest_schema_invalid_author_empty_string():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"author": " ",
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
assert len(errors) == 1
assert errors[0]["loc"][0] == "author"
assert errors[0]["ctx"]["error_code"] == AppErrorCode.INVALID.value
def test_manifest_schema_valid_author_none():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"author": None,
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.author is None
def test_manifest_schema_valid_author_strips_whitespace():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"author": " Acme Ltd ",
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.author == "Acme Ltd"
def test_manifest_schema_brand_missing_logo_default():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"brand": {"logo": {}},
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
assert any(e["loc"][-1] == "default" for e in errors)
def test_manifest_schema_brand_invalid_url():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"brand": {"logo": {"default": "not-a-url"}},
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
assert len(errors) == 1
assert errors[0]["ctx"]["error_code"] == AppErrorCode.INVALID_URL_FORMAT.value
def test_manifest_schema_brand_invalid_mime_type():
# given
assert "image/jpeg" not in ICON_MIME_TYPES
manifest_data = {
**MINIMAL_MANIFEST,
"brand": {"logo": {"default": "https://example.com/logo.jpg"}},
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
assert len(errors) == 1
assert errors[0]["ctx"]["error_code"] == AppErrorCode.INVALID_URL_FORMAT.value
def test_manifest_schema_brand_valid_png():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"brand": {"logo": {"default": "https://example.com/logo.png"}},
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.brand is not None
assert schema.brand.logo.default == "https://example.com/logo.png"
def test_manifest_schema_extensions_null_defaults_to_empty_list():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"extensions": None,
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.extensions == []
def test_manifest_schema_webhooks_null_defaults_to_empty_list():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"webhooks": None,
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.webhooks == []
def test_manifest_schema_extension_missing_required_fields():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"extensions": [{"url": "https://example.com/ext"}],
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
error_fields = {e["loc"][-1] for e in errors}
assert "label" in error_fields
assert "mount" in error_fields
def test_manifest_schema_webhook_missing_required_fields():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"webhooks": [{"name": "my-webhook"}],
}
# when
with pytest.raises(PydanticValidationError) as exc_info:
ManifestSchema.model_validate(manifest_data)
# then
errors = exc_info.value.errors()
error_fields = {e["loc"][-1] for e in errors}
assert "targetUrl" in error_fields
assert "query" in error_fields
def test_manifest_schema_webhook_is_active_defaults_to_true():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"webhooks": [
{
"name": "my-webhook",
"targetUrl": "https://example.com/webhook",
"query": "subscription { event { ... on OrderCreated { order { id } } } }",
}
],
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.webhooks[0].is_active is True
def test_manifest_schema_full_input():
# given
manifest_data = {
"id": "app.example",
"name": "My App",
"version": "1.0.0",
"tokenTargetUrl": "https://example.com/token",
"appUrl": "https://example.com",
"homepageUrl": "https://example.com",
"supportUrl": "https://example.com/support",
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.token_target_url == manifest_data["tokenTargetUrl"]
assert schema.app_url == manifest_data["appUrl"]
assert schema.homepage_url == manifest_data["homepageUrl"]
assert schema.support_url == manifest_data["supportUrl"]
def test_manifest_schema_extra_fields_ignored():
# given
manifest_data = {
**MINIMAL_MANIFEST,
"unknownField": "some value",
"anotherExtra": 42,
}
# when
schema = ManifestSchema.model_validate(manifest_data)
# then
assert schema.id == MINIMAL_MANIFEST["id"]
def test_manifest_schema_deprecated_fields_accepted():
# given — deprecated fields are excluded from schema but must not cause a
# validation error, and must remain accessible in the original dict so that
# downstream code (installation_utils, app_fetch_manifest) can still read them
data_privacy = "We do not store your data."
configuration_url = "https://example.com/config"
manifest_data = {
**MINIMAL_MANIFEST,
"dataPrivacy": data_privacy,
"configurationUrl": configuration_url,
}
# when
ManifestSchema.model_validate(manifest_data)
# then — original dict is untouched; downstream readers can still access the values
assert manifest_data["dataPrivacy"] == data_privacy
assert manifest_data["configurationUrl"] == configuration_url
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/app/tests/test_manifest_validations.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/delivery_context.py | from dataclasses import dataclass
from decimal import Decimal
from typing import TYPE_CHECKING, Any, Optional, Union
from uuid import UUID
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from prices import Money
from promise import Promise
from ..core.db.connection import (
allow_writer,
)
from ..core.prices import quantize_price
from ..core.tracing import traced_atomic_transaction
from ..discount import VoucherType
from ..shipping.interface import ExcludedShippingMethod, ShippingMethodData
from ..shipping.models import ShippingMethod, ShippingMethodChannelListing
from ..shipping.utils import (
convert_shipping_method_data_to_checkout_delivery,
convert_to_shipping_method_data,
initialize_shipping_method_active_status,
)
from ..warehouse import WarehouseClickAndCollectOption
from ..warehouse.models import Warehouse
from ..webhook.event_types import WebhookEventAsyncType
from . import base_calculations
from .lock_objects import checkout_qs_select_for_update
from .models import Checkout, CheckoutDelivery, CheckoutLine
if TYPE_CHECKING:
from ..account.models import Address, User
from ..app.models import App
from ..plugins.manager import PluginsManager
from .fetch import CheckoutInfo, CheckoutLineInfo
PRIVATE_META_APP_SHIPPING_ID = "external_app_shipping_id"
@dataclass(frozen=True)
class DeliveryMethodBase:
delivery_method: Union["ShippingMethodData", "Warehouse"] | None = None
shipping_address: Optional["Address"] = None
store_as_customer_address: bool = False
@property
def warehouse_pk(self) -> UUID | None:
pass
@property
def delivery_method_order_field(self) -> dict:
return {"shipping_method": self.delivery_method}
@property
def is_local_collection_point(self) -> bool:
return False
@property
def delivery_method_name(self) -> dict[str, str | None]:
return {"shipping_method_name": None}
def get_warehouse_filter_lookup(self) -> dict[str, Any]:
return {}
def is_valid_delivery_method(self) -> bool:
return False
def is_method_in_valid_methods(self, checkout_info: "CheckoutInfo") -> bool:
return False
def is_delivery_method_set(self) -> bool:
return bool(self.delivery_method)
def get_details_for_conversion_to_order(self) -> dict[str, Any]:
return {"shipping_method_name": None}
@dataclass(frozen=True)
class ShippingMethodInfo(DeliveryMethodBase):
delivery_method: "ShippingMethodData"
shipping_address: Optional["Address"]
store_as_customer_address: bool = True
@property
def delivery_method_name(self) -> dict[str, str | None]:
return {"shipping_method_name": str(self.delivery_method.name)}
@property
def delivery_method_order_field(self) -> dict:
if not self.delivery_method.is_external:
return {"shipping_method_id": int(self.delivery_method.id)}
return {}
def is_valid_delivery_method(self) -> bool:
return bool(self.shipping_address)
def is_method_in_valid_methods(self, checkout_info: "CheckoutInfo") -> bool:
return self.delivery_method.active
def get_details_for_conversion_to_order(self) -> dict[str, str | int | None]:
details: dict[str, str | int | None] = {
"shipping_method_name": str(self.delivery_method.name)
}
if not self.delivery_method.is_external:
details["shipping_method_id"] = int(self.delivery_method.id)
if self.delivery_method.tax_class:
details["shipping_tax_class_id"] = self.delivery_method.tax_class.id
details["shipping_tax_class_name"] = str(
self.delivery_method.tax_class.name
)
details["shipping_tax_class_private_metadata"] = (
self.delivery_method.tax_class.private_metadata
)
details["shipping_tax_class_metadata"] = (
self.delivery_method.tax_class.metadata
)
return details
@dataclass(frozen=True)
class CollectionPointInfo(DeliveryMethodBase):
delivery_method: "Warehouse"
shipping_address: Optional["Address"]
@property
def warehouse_pk(self):
return self.delivery_method.pk
@property
def delivery_method_order_field(self) -> dict:
return {"collection_point": self.delivery_method}
@property
def is_local_collection_point(self):
return (
self.delivery_method.click_and_collect_option
== WarehouseClickAndCollectOption.LOCAL_STOCK
)
@property
def delivery_method_name(self) -> dict[str, str | None]:
return {"collection_point_name": str(self.delivery_method)}
def get_warehouse_filter_lookup(self) -> dict[str, Any]:
return (
{"warehouse_id": self.delivery_method.pk}
if self.is_local_collection_point
else {}
)
def is_valid_delivery_method(self) -> bool:
return (
self.shipping_address is not None
and self.shipping_address == self.delivery_method.address
)
def is_method_in_valid_methods(self, checkout_info) -> bool:
valid_delivery_methods = checkout_info.valid_pick_up_points
return bool(
valid_delivery_methods and self.delivery_method in valid_delivery_methods
)
def get_details_for_conversion_to_order(self) -> dict[str, Any]:
return {
"collection_point_name": str(self.delivery_method),
"collection_point": self.delivery_method,
}
def is_shipping_required(lines: list["CheckoutLineInfo"]):
"""Check if shipping is required for given checkout lines."""
return any(line_info.product_type.is_shipping_required for line_info in lines)
def get_valid_internal_shipping_methods_for_checkout_info(
checkout_info: "CheckoutInfo",
subtotal: "Money",
) -> list[ShippingMethodData]:
if not is_shipping_required(checkout_info.lines):
return []
if not checkout_info.shipping_address:
return []
country_code = (
checkout_info.shipping_address.country.code
if checkout_info.shipping_address
else None
)
shipping_methods = ShippingMethod.objects.using(
checkout_info.database_connection_name
).applicable_shipping_methods_for_instance(
checkout_info.checkout,
channel_id=checkout_info.checkout.channel_id,
price=subtotal,
shipping_address=checkout_info.shipping_address,
country_code=country_code,
lines=checkout_info.lines,
)
shipping_channel_listings = ShippingMethodChannelListing.objects.using(
checkout_info.database_connection_name
).filter(
channel_id=checkout_info.channel.id,
shipping_method_id__in=[method.pk for method in shipping_methods],
)
channel_listings_map = {
listing.shipping_method_id: listing for listing in shipping_channel_listings
}
internal_methods: list[ShippingMethodData] = []
for method in shipping_methods:
listing = channel_listings_map.get(method.pk)
if listing:
shipping_method_data = convert_to_shipping_method_data(method, listing)
internal_methods.append(shipping_method_data)
return internal_methods
def get_valid_collection_points_for_checkout(
lines: list["CheckoutLineInfo"],
channel_id: int,
quantity_check: bool = True,
database_connection_name: str = settings.DATABASE_CONNECTION_DEFAULT_NAME,
):
"""Return a collection of `Warehouse`s that can be used as a collection point.
Note that `quantity_check=False` should be used, when stocks quantity will
be validated in further steps (checkout completion) in order to raise
'InsufficientProductStock' error instead of 'InvalidShippingError'.
"""
if not is_shipping_required(lines):
return []
line_ids = [line_info.line.id for line_info in lines]
lines = CheckoutLine.objects.using(database_connection_name).filter(id__in=line_ids)
return (
Warehouse.objects.using(
database_connection_name
).applicable_for_click_and_collect(lines, channel_id)
if quantity_check
else Warehouse.objects.using(
database_connection_name
).applicable_for_click_and_collect_no_quantity_check(lines, channel_id)
)
def _remove_external_shipping_from_metadata(checkout: Checkout):
from .utils import get_checkout_metadata
metadata = get_checkout_metadata(checkout)
if not metadata:
return
field_deleted = metadata.delete_value_from_private_metadata(
PRIVATE_META_APP_SHIPPING_ID
)
if field_deleted:
metadata.save(update_fields=["private_metadata"])
def _remove_undiscounted_base_shipping_price(checkout: Checkout):
if checkout.undiscounted_base_shipping_price_amount:
checkout.undiscounted_base_shipping_price_amount = Decimal(0)
return ["undiscounted_base_shipping_price_amount"]
return []
def _assign_undiscounted_base_shipping_price_to_checkout(
checkout, checkout_delivery: CheckoutDelivery
):
current_shipping_price = quantize_price(
checkout.undiscounted_base_shipping_price, checkout.currency
)
new_shipping_price = quantize_price(checkout_delivery.price, checkout.currency)
if current_shipping_price != new_shipping_price:
checkout.undiscounted_base_shipping_price_amount = new_shipping_price.amount
return ["undiscounted_base_shipping_price_amount"]
return []
def assign_shipping_method_to_checkout(
checkout: Checkout, checkout_delivery: CheckoutDelivery
) -> list[str]:
fields_to_update = []
fields_to_update += remove_click_and_collect_from_checkout(checkout)
fields_to_update += _assign_undiscounted_base_shipping_price_to_checkout(
checkout, checkout_delivery
)
if checkout.assigned_delivery_id != checkout_delivery.id:
checkout.assigned_delivery = checkout_delivery
fields_to_update.append("assigned_delivery_id")
# make sure that we don't have obsolete data for shipping methods stored in
# private metadata
_remove_external_shipping_from_metadata(checkout=checkout)
if checkout.shipping_method_name != checkout_delivery.name:
checkout.shipping_method_name = checkout_delivery.name
fields_to_update.append("shipping_method_name")
return fields_to_update
def assign_collection_point_to_checkout(
checkout, collection_point: Warehouse
) -> list[str]:
fields_to_update = []
fields_to_update += _remove_undiscounted_base_shipping_price(checkout)
fields_to_update += remove_shipping_method_from_checkout(checkout)
if checkout.collection_point_id != collection_point.id:
checkout.collection_point_id = collection_point.id
fields_to_update.append("collection_point_id")
if checkout.shipping_address != collection_point.address:
checkout.shipping_address = collection_point.address.get_copy()
checkout.save_shipping_address = False
fields_to_update.extend(["shipping_address_id", "save_shipping_address"])
return fields_to_update
def remove_shipping_method_from_checkout(checkout: Checkout) -> list[str]:
fields_to_update = []
if checkout.assigned_delivery_id:
checkout.assigned_delivery_id = None
fields_to_update.append("assigned_delivery_id")
if checkout.shipping_method_name is not None:
checkout.shipping_method_name = None
fields_to_update.append("shipping_method_name")
return fields_to_update
def remove_click_and_collect_from_checkout(checkout: Checkout) -> list[str]:
fields_to_update = []
if checkout.collection_point_id:
checkout.collection_point_id = None
fields_to_update.append("collection_point_id")
if checkout.shipping_address_id:
checkout.shipping_address = None
# reset the save_shipping_address flag to the default value
checkout.save_shipping_address = True
fields_to_update.extend(["shipping_address_id", "save_shipping_address"])
return fields_to_update
def remove_delivery_method_from_checkout(checkout: Checkout) -> list[str]:
fields_to_update = []
fields_to_update += _remove_undiscounted_base_shipping_price(checkout)
fields_to_update += remove_shipping_method_from_checkout(checkout)
fields_to_update += remove_click_and_collect_from_checkout(checkout)
return fields_to_update
def clear_cc_delivery_method(
checkout_info: "CheckoutInfo", save: bool = True
) -> list[str]:
checkout = checkout_info.checkout
if checkout.collection_point_id is None:
return []
updated_fields = remove_click_and_collect_from_checkout(checkout)
if "collection_point_id" in updated_fields:
checkout_info.shipping_address = checkout_info.checkout.shipping_address
if updated_fields:
updated_fields.append("last_change")
if save:
checkout.safe_update(updated_fields)
return updated_fields
def is_delivery_changed(
first: CheckoutDelivery,
second: CheckoutDelivery,
) -> bool:
return (
first.name != second.name
or first.price != second.price
or first.tax_class_id != second.tax_class_id
or first.maximum_delivery_days != second.maximum_delivery_days
or first.minimum_delivery_days != second.minimum_delivery_days
)
def _overwrite_assigned_delivery(
checkout_info: "CheckoutInfo",
assigned_delivery: CheckoutDelivery | None,
refreshed_delivery: CheckoutDelivery | None,
):
"""Overwrite assigned delivery.
Function overwrites the details of assigned delivery with the
refreshed data. If refreshed delivery is missing, the assigned one
will be marked as invalid.
If the tax or price field have been changed the checkout taxes
will be marked as invalid.
"""
if not assigned_delivery:
return
# Update current assigned delivery with new details or set
# is_valid:False
if refreshed_delivery:
_create_or_update_checkout_deliveries([refreshed_delivery])
else:
_invalidate_assigned_delivery(assigned_delivery)
if _refreshed_assigned_delivery_has_impact_on_prices(
assigned_delivery, refreshed_delivery
):
from .utils import invalidate_checkout
invalidate_checkout(
checkout_info=checkout_info,
lines=checkout_info.lines,
manager=checkout_info.manager,
recalculate_discount=True,
save=True,
)
return
def _restore_assigned_delivery_as_valid(
checkout: Checkout,
assigned_delivery: CheckoutDelivery,
):
# Database has a constrain on these fields, to keep the same ID of assigned delivery
# we need to first delete the delivery that is marked as valid
CheckoutDelivery.objects.filter(
checkout_id=checkout.pk,
external_shipping_method_id=assigned_delivery.external_shipping_method_id,
built_in_shipping_method_id=assigned_delivery.built_in_shipping_method_id,
is_valid=True,
).exclude(pk=assigned_delivery.pk).delete()
assigned_delivery.is_valid = True
assigned_delivery.save(update_fields=["is_valid"])
def _invalidate_assigned_delivery(assigned_delivery: CheckoutDelivery):
assigned_delivery.is_valid = False
assigned_delivery.save(update_fields=["is_valid"])
def _preserve_assigned_delivery(
checkout: Checkout,
assigned_delivery: CheckoutDelivery | None,
refreshed_delivery: CheckoutDelivery | None,
):
"""Preserve assigned delivery.
Creates a new delivery method if the refreshed delivery has changed and marks the
assigned delivery as invalid.
"""
if not assigned_delivery:
return
# If refreshed is missing, mark assigned as invalid
if not refreshed_delivery:
_invalidate_assigned_delivery(assigned_delivery)
return
delivery_changed = is_delivery_changed(assigned_delivery, refreshed_delivery)
if not delivery_changed and assigned_delivery.is_valid:
return
if not delivery_changed and not assigned_delivery.is_valid:
_restore_assigned_delivery_as_valid(
checkout=checkout,
assigned_delivery=assigned_delivery,
)
if delivery_changed and assigned_delivery.is_valid:
_invalidate_assigned_delivery(assigned_delivery)
# this call ensures the update happens atomically as part of
# preserving the assigned delivery.
_create_or_update_checkout_deliveries([refreshed_delivery])
def _refresh_checkout_deliveries(
checkout: "Checkout",
assigned_delivery: CheckoutDelivery | None,
checkout_deliveries: list["CheckoutDelivery"],
built_in_shipping_methods_dict: dict[int, ShippingMethodData],
external_shipping_methods_dict: dict[str, ShippingMethodData],
):
"""Refresh checkout deliveries assigned to the checkout.
It updates the `CheckoutDelivery` instances associated with the checkout, based
on the shipping methods available for the checkout.
The non-available shipping methods are removed from the DB, except for the currently
assigned delivery method, which is always preserved even if it's no longer valid.
"""
exclude_from_delete = Q(
built_in_shipping_method_id__in=list(built_in_shipping_methods_dict.keys())
) | Q(external_shipping_method_id__in=list(external_shipping_methods_dict.keys()))
if assigned_delivery:
# Always preserve the assigned delivery even if it's no longer available
exclude_from_delete |= Q(pk=assigned_delivery.pk)
CheckoutDelivery.objects.filter(
checkout_id=checkout.pk,
).exclude(exclude_from_delete).delete()
if checkout_deliveries:
_create_or_update_checkout_deliveries(checkout_deliveries)
def _create_or_update_checkout_deliveries(deliveries: list[CheckoutDelivery]):
CheckoutDelivery.objects.bulk_create(
deliveries,
update_conflicts=True,
unique_fields=[
"checkout_id",
"external_shipping_method_id",
"built_in_shipping_method_id",
"is_valid",
],
update_fields=[
"name",
"description",
"price_amount",
"currency",
"maximum_delivery_days",
"minimum_delivery_days",
"metadata",
"private_metadata",
"active",
"message",
"updated_at",
"is_valid",
"is_external",
"tax_class_id",
"tax_class_name",
"tax_class_metadata",
"tax_class_private_metadata",
],
)
def get_available_built_in_shipping_methods_for_checkout_info(
checkout_info: "CheckoutInfo",
) -> list["ShippingMethodData"]:
lines = checkout_info.lines
subtotal = base_calculations.base_checkout_subtotal(
lines,
checkout_info.channel,
checkout_info.checkout.currency,
)
# if a voucher is applied to shipping, we don't want to subtract the discount amount
# as some methods based on shipping price may become unavailable,
# for example, method on which the discount was applied
is_shipping_voucher = (
checkout_info.voucher and checkout_info.voucher.type == VoucherType.SHIPPING
)
is_voucher_for_specific_product = (
checkout_info.voucher
and checkout_info.voucher.type == VoucherType.SPECIFIC_PRODUCT
)
if not is_shipping_voucher and not is_voucher_for_specific_product:
subtotal -= checkout_info.checkout.discount
valid_shipping_methods = get_valid_internal_shipping_methods_for_checkout_info(
checkout_info,
subtotal,
)
return valid_shipping_methods
def _refreshed_assigned_delivery_has_impact_on_prices(
assigned_delivery: CheckoutDelivery,
refreshed_delivery: CheckoutDelivery | None,
) -> bool:
"""Check if refreshed assigned delivery impacts checkout prices.
If the assigned delivery method has changed in a way that affects pricing,
such as a change in tax class, price or marking as invalid, this function
returns True. Otherwise, it doesn't impact prices and returns False.
"""
if not refreshed_delivery:
return True
# Different tax class can impact on prices
if refreshed_delivery.tax_class_id != assigned_delivery.tax_class_id:
return True
# Different price means that assigned delivery is invalid
if refreshed_delivery.price != assigned_delivery.price:
return True
return False
def fetch_shipping_methods_for_checkout(
checkout_info: "CheckoutInfo",
requestor: Union["App", "User", None],
overwrite_assigned_delivery: bool = True,
) -> Promise[list[CheckoutDelivery]]:
"""Fetch shipping methods for the checkout.
Fetches all available shipping methods, both built-in and external, for the given
checkout. Each method is returned as a CheckoutDelivery instance. Existing
shipping methods in the database are updated or removed as needed, while the
checkout's currently assigned shipping method (`assigned_delivery`) is
always preserved, even if it is no longer available.
`overwrite_assigned_delivery`- controls how the assigned delivery method is updated:
- True: updates the assigned delivery method's details and marks taxes as invalid if needed.
- False: creates a new delivery method if the refreshed delivery has changed and marks the
assigned delivery as invalid.
"""
checkout = checkout_info.checkout
built_in_shipping_methods_dict: dict[int, ShippingMethodData] = {
int(shipping_method.id): shipping_method
for shipping_method in get_available_built_in_shipping_methods_for_checkout_info(
checkout_info=checkout_info
)
}
def with_external_methods(external_shipping_methods: list[ShippingMethodData]):
external_shipping_methods_dict: dict[str, ShippingMethodData] = {
shipping_method.id: shipping_method
for shipping_method in external_shipping_methods
}
all_methods = list(built_in_shipping_methods_dict.values()) + list(
external_shipping_methods_dict.values()
)
# Circular import caused by the current definition of subscription payloads
# and their usage in webhook/transport layer. Until moving them out from the
# transport, we will have circular imports.
from .webhooks.exclude_shipping import excluded_shipping_methods_for_checkout
allow_replica = not (
checkout_info.database_connection_name
== settings.DATABASE_CONNECTION_DEFAULT_NAME
)
@allow_writer()
def with_excluded_methods(excluded_methods: list[ExcludedShippingMethod]):
initialize_shipping_method_active_status(all_methods, excluded_methods)
checkout_deliveries = {}
for shipping_method_data in all_methods:
checkout_delivery_method = (
convert_shipping_method_data_to_checkout_delivery(
shipping_method_data, checkout
)
)
checkout_deliveries[shipping_method_data.id] = checkout_delivery_method
with traced_atomic_transaction():
locked_checkout = (
checkout_qs_select_for_update().filter(token=checkout.token).first()
)
if not locked_checkout:
return []
if (
locked_checkout.assigned_delivery_id
!= checkout.assigned_delivery_id
):
return []
assigned_delivery = checkout.assigned_delivery
checkout.delivery_methods_stale_at = (
timezone.now() + settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
checkout.save(update_fields=["delivery_methods_stale_at"])
refreshed_assigned = None
if assigned_delivery:
refreshed_assigned = checkout_deliveries.get(
assigned_delivery.shipping_method_id
)
if overwrite_assigned_delivery:
_overwrite_assigned_delivery(
checkout_info=checkout_info,
assigned_delivery=assigned_delivery,
refreshed_delivery=refreshed_assigned,
)
else:
_preserve_assigned_delivery(
checkout=checkout,
assigned_delivery=assigned_delivery,
refreshed_delivery=refreshed_assigned,
)
_refresh_checkout_deliveries(
checkout=locked_checkout,
assigned_delivery=assigned_delivery,
checkout_deliveries=list(checkout_deliveries.values()),
built_in_shipping_methods_dict=built_in_shipping_methods_dict,
external_shipping_methods_dict=external_shipping_methods_dict,
)
if checkout_deliveries:
return list(
CheckoutDelivery.objects.filter(
checkout_id=checkout.pk,
is_valid=True,
)
)
return []
return excluded_shipping_methods_for_checkout(
checkout,
available_shipping_methods=all_methods,
allow_replica=allow_replica,
requestor=requestor,
).then(with_excluded_methods)
return fetch_external_shipping_methods_for_checkout_info(
checkout_info=checkout_info,
available_built_in_methods=list(built_in_shipping_methods_dict.values()),
requestor=requestor,
).then(with_external_methods)
def fetch_external_shipping_methods_for_checkout_info(
checkout_info,
available_built_in_methods: list[ShippingMethodData],
requestor: Union["App", "User", None],
) -> Promise[list[ShippingMethodData]]:
from .webhooks.list_shipping_methods import list_shipping_methods_for_checkout
allow_replica = not (
checkout_info.database_connection_name
== settings.DATABASE_CONNECTION_DEFAULT_NAME
)
return list_shipping_methods_for_checkout(
checkout=checkout_info.checkout,
built_in_shipping_methods=available_built_in_methods,
allow_replica=allow_replica,
requestor=requestor,
)
def get_or_fetch_checkout_deliveries(
checkout_info: "CheckoutInfo",
requestor: Union["App", "User", None],
allow_sync_webhooks: bool = True,
) -> Promise[list[CheckoutDelivery]]:
"""Get or fetch shipping methods for the checkout.
If the checkout's shipping methods are stale or missing, fetch and update them.
Otherwise, return the existing valid shipping methods.
"""
checkout = checkout_info.checkout
if (
checkout.delivery_methods_stale_at is None
or checkout.delivery_methods_stale_at <= timezone.now()
) and allow_sync_webhooks:
return fetch_shipping_methods_for_checkout(checkout_info, requestor=requestor)
return Promise.resolve(
list(
CheckoutDelivery.objects.using(
checkout_info.database_connection_name
).filter(
checkout_id=checkout.pk,
is_valid=True,
)
)
)
def assign_delivery_method_to_checkout(
checkout_info: "CheckoutInfo",
lines_info: list["CheckoutLineInfo"],
manager: "PluginsManager",
delivery_method: CheckoutDelivery | Warehouse | None,
):
fields_to_update = []
checkout = checkout_info.checkout
with transaction.atomic():
if delivery_method is None:
fields_to_update = remove_delivery_method_from_checkout(
checkout=checkout_info.checkout
)
checkout_info.collection_point = None
elif isinstance(delivery_method, CheckoutDelivery):
fields_to_update = assign_shipping_method_to_checkout(
checkout, delivery_method
)
checkout_info.collection_point = None
elif isinstance(delivery_method, Warehouse):
fields_to_update = assign_collection_point_to_checkout(
checkout, delivery_method
)
checkout_info.shipping_address = checkout.shipping_address
if not fields_to_update:
return
from .actions import call_checkout_info_event
from .utils import invalidate_checkout
invalidate_prices_updated_fields = invalidate_checkout(
checkout_info, lines_info, manager, save=False
)
checkout.save(update_fields=fields_to_update + invalidate_prices_updated_fields)
call_checkout_info_event(
manager,
event_name=WebhookEventAsyncType.CHECKOUT_UPDATED,
checkout_info=checkout_info,
lines=lines_info,
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/delivery_context.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 675,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/checkout/tests/test_delivery_context.py | import datetime
from decimal import Decimal
from unittest import mock
import graphene
from django.utils import timezone
from freezegun import freeze_time
from prices import Money
from promise import Promise
from ...shipping.interface import ExcludedShippingMethod, ShippingMethodData
from ...shipping.models import ShippingMethod
from ...shipping.utils import convert_shipping_method_data_to_checkout_delivery
from ...webhook.transport.shipping_helpers import to_shipping_app_id
from ..delivery_context import (
DeliveryMethodBase,
assign_delivery_method_to_checkout,
clear_cc_delivery_method,
fetch_shipping_methods_for_checkout,
)
from ..fetch import (
fetch_checkout_info,
fetch_checkout_lines,
)
from ..models import CheckoutDelivery
def _assert_built_in_shipping_method(
checkout_delivery: CheckoutDelivery,
available_shipping_method: ShippingMethod,
checkout,
settings,
):
shipping_listing = available_shipping_method.channel_listings.get(
channel_id=checkout.channel_id
)
assert isinstance(checkout_delivery, CheckoutDelivery)
assert checkout_delivery.checkout_id == checkout.pk
assert checkout_delivery.built_in_shipping_method_id == available_shipping_method.id
assert checkout_delivery.name == available_shipping_method.name
assert checkout_delivery.price_amount == shipping_listing.price_amount
assert checkout_delivery.currency == shipping_listing.price.currency
assert str(checkout_delivery.description) == str(
available_shipping_method.description
)
assert (
checkout_delivery.minimum_delivery_days
== available_shipping_method.minimum_delivery_days
)
assert (
checkout_delivery.maximum_delivery_days
== available_shipping_method.maximum_delivery_days
)
assert checkout_delivery.active is True
assert checkout_delivery.is_valid is True
assert checkout_delivery.is_external is False
assert checkout_delivery.tax_class_id == available_shipping_method.tax_class_id
assert checkout_delivery.tax_class_name == available_shipping_method.tax_class.name
assert (
checkout_delivery.tax_class_metadata
== available_shipping_method.tax_class.metadata
)
assert (
checkout_delivery.tax_class_private_metadata
== available_shipping_method.tax_class.private_metadata
)
checkout.refresh_from_db()
assert (
checkout.delivery_methods_stale_at
== timezone.now() + settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_with_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
CheckoutDelivery.objects.all().delete()
available_shipping_method = ShippingMethod.objects.get()
assert available_shipping_method.tax_class
available_shipping_method.description = {
"time": 1759214012137,
"blocks": [
{
"id": "fQMbdjz2Yt",
"data": {"text": "<b>This is Shipping description</b>"},
"type": "paragraph",
}
],
"version": "2.31.0-rc.7",
}
available_shipping_method.minimum_delivery_days = 5
available_shipping_method.maximum_delivery_days = 10
available_shipping_method.metadata = {
"key": "value",
}
available_shipping_method.private_metadata = {
"private_key": "private_value",
}
available_shipping_method.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
# Confirm that new shipping method was created
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
assert checkout.shipping_methods.count() == 1
# Make sure that shipping method data is aligned with the built-in shipping method
_assert_built_in_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_updates_existing_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings, tax_class_zero_rates
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.delivery_methods_stale_at = (
timezone.now() - settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
checkout.assigned_delivery_id = None
checkout.save(
update_fields=[
"shipping_address",
"delivery_methods_stale_at",
"assigned_delivery_id",
]
)
available_shipping_method = ShippingMethod.objects.get()
existing_shipping_method = checkout.shipping_methods.create(
built_in_shipping_method_id=available_shipping_method.id,
name=available_shipping_method.name,
description=available_shipping_method.description,
price_amount=Decimal(99),
currency="USD",
maximum_delivery_days=available_shipping_method.maximum_delivery_days,
minimum_delivery_days=available_shipping_method.minimum_delivery_days,
metadata=available_shipping_method.metadata,
private_metadata=available_shipping_method.private_metadata,
active=True,
message=None,
is_valid=True,
is_external=False,
)
available_shipping_method.description = {
"time": 1759214012137,
"blocks": [
{
"id": "fQMbdjz2Yt",
"data": {"text": "<b>This is Shipping description</b>"},
"type": "paragraph",
}
],
"version": "2.31.0-rc.7",
}
available_shipping_method.minimum_delivery_days = 5
available_shipping_method.maximum_delivery_days = 10
available_shipping_method.metadata = {
"key": "value",
}
available_shipping_method.private_metadata = {
"private_key": "private_value",
}
available_shipping_method.tax_class = tax_class_zero_rates
available_shipping_method.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
# Confirm that we updated the shipping method instead of creating a new one
assert len(shipping_methods) == 1
assert CheckoutDelivery.objects.count() == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
assert existing_shipping_method.id == checkout_delivery.id
# Make sure that shipping method data has been updated to align with the built-in shipping method
_assert_built_in_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_removes_non_applicable_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.delivery_methods_stale_at = (
timezone.now() + settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
available_shipping_method = ShippingMethod.objects.get()
non_applicable_shipping_method_id = available_shipping_method.id + 1
checkout.shipping_methods.create(
built_in_shipping_method_id=non_applicable_shipping_method_id,
name="Nonexisting Shipping Method",
price_amount=Decimal(99),
currency="USD",
)
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert CheckoutDelivery.objects.count() == 1
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
# Confirms that the non-applicable shipping method was removed and the
# new one was created
_assert_built_in_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_non_applicable_assigned_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.delivery_methods_stale_at = (
timezone.now() + settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
available_shipping_method = ShippingMethod.objects.get()
non_applicable_shipping_method_id = available_shipping_method.id + 1
assigned_delivery = checkout.shipping_methods.create(
built_in_shipping_method_id=non_applicable_shipping_method_id,
name="Nonexisting Shipping Method",
price_amount=Decimal(99),
currency="USD",
)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 1
new_checkout_delivery = shipping_methods[0]
assigned_delivery = CheckoutDelivery.objects.get(is_valid=False)
# Assigned shipping method is never removed explicitly but marked as invalid
assert assigned_delivery.is_valid is False
assert (
assigned_delivery.built_in_shipping_method_id
== assigned_delivery.built_in_shipping_method_id
)
assert checkout.assigned_delivery_id == assigned_delivery.id
# Confirm that new shipping method was created
_assert_built_in_shipping_method(
new_checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_excluded_built_in_shipping_method(
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
):
# given
unavailable_shipping_method = ShippingMethod.objects.get()
exclude_reason = "This shipping method is not available."
mocked_exclude_shipping_methods.return_value = Promise.resolve(
[
ExcludedShippingMethod(
id=str(unavailable_shipping_method.id),
reason=exclude_reason,
)
]
)
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
assert checkout.shipping_methods.count() == 1
assert checkout_delivery.active is False
assert checkout_delivery.message == exclude_reason
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_with_changed_price_of_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
CheckoutDelivery.objects.all().delete()
available_shipping_method = ShippingMethod.objects.get()
shipping_channel_listing = available_shipping_method.channel_listings.get(
channel_id=checkout.channel_id
)
previous_shipping_price = shipping_channel_listing.price_amount
assigned_delivery = checkout.shipping_methods.create(
built_in_shipping_method_id=available_shipping_method.id,
name="Nonexisting Shipping Method",
price_amount=previous_shipping_price,
currency="USD",
)
checkout.assigned_delivery = assigned_delivery
checkout.save()
# Change the price of the shipping method in channel listing
new_shipping_price_amount = previous_shipping_price + Decimal(10)
shipping_channel_listing.price_amount = new_shipping_price_amount
shipping_channel_listing.save(update_fields=["price_amount"])
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
fetch_shipping_methods_for_checkout(checkout_info, requestor=None)
# then
checkout.refresh_from_db()
assigned_delivery.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
# Changing the price of shipping method assigned to checkout
# caused that after fetching shipping methods, the checkout
# prices are marked as expired.
assert checkout.price_expiration == timezone.now()
# The assigned shipping method has updated price
assert assigned_delivery.price_amount == new_shipping_price_amount
@freeze_time("2024-05-31 12:00:01")
def test_fetch_shipping_methods_for_checkout_with_changed_tax_class_of_built_in_shipping_method(
checkout_with_item, plugins_manager, address, settings, tax_class_zero_rates
):
# given
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
CheckoutDelivery.objects.all().delete()
available_shipping_method = ShippingMethod.objects.get()
shipping_channel_listing = available_shipping_method.channel_listings.get(
channel_id=checkout.channel_id
)
previous_shipping_price = shipping_channel_listing.price_amount
assigned_delivery = checkout.shipping_methods.create(
built_in_shipping_method_id=available_shipping_method.id,
name="Nonexisting Shipping Method",
price_amount=previous_shipping_price,
currency="USD",
tax_class_id=available_shipping_method.tax_class_id,
)
checkout.assigned_delivery = assigned_delivery
checkout.save()
assert available_shipping_method.tax_class != tax_class_zero_rates
available_shipping_method.tax_class = tax_class_zero_rates
available_shipping_method.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
fetch_shipping_methods_for_checkout(checkout_info, requestor=None)
# then
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
# Changing the tax class of shipping method assigned to checkout
# caused that after fetching shipping methods, the checkout
# prices are marked as expired.
assert checkout.price_expiration == timezone.now()
def _assert_external_shipping_method(
checkout_delivery: CheckoutDelivery,
available_shipping_method: ShippingMethodData,
checkout,
settings,
):
assert checkout_delivery.checkout_id == checkout.pk
assert checkout_delivery.external_shipping_method_id == available_shipping_method.id
assert checkout_delivery.name == available_shipping_method.name
assert checkout_delivery.price_amount == available_shipping_method.price.amount
assert checkout_delivery.currency == available_shipping_method.price.currency
assert checkout_delivery.description == str(available_shipping_method.description)
assert (
checkout_delivery.minimum_delivery_days
== available_shipping_method.minimum_delivery_days
)
assert (
checkout_delivery.maximum_delivery_days
== available_shipping_method.maximum_delivery_days
)
assert checkout_delivery.active == available_shipping_method.active
assert checkout_delivery.is_valid is True
assert checkout_delivery.is_external is True
checkout.refresh_from_db()
assert (
checkout.delivery_methods_stale_at
== timezone.now() + settings.CHECKOUT_DELIVERY_OPTIONS_TTL
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_external_shipping_method(
mocked_webhook,
checkout_with_item,
plugins_manager,
address,
app,
settings,
):
# given
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(Decimal(10), checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_webhook.return_value = Promise.resolve([available_shipping_method])
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
# Confirms that new shipping method was created
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
# Make sure that shipping method data is aligned with the external shipping method
_assert_external_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_updates_existing_external_shipping_method(
mocked_webhook,
checkout_with_item,
plugins_manager,
address,
app,
settings,
):
# given
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(Decimal(10), checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_webhook.return_value = Promise.resolve([available_shipping_method])
checkout = checkout_with_item
checkout.shipping_methods.create(
external_shipping_method_id=to_shipping_app_id(
app, "external-shipping-method-id"
),
name="Old External Shipping name",
price_amount=Decimal(99),
currency="USD",
)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now()
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
# Make sure that shipping method data has been updated to align with the
# external shipping method
_assert_external_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_removes_non_applicable_external_shipping_method(
mocked_webhook,
checkout_with_item,
plugins_manager,
address,
app,
external_app,
settings,
):
# given
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(Decimal(10), checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_webhook.return_value = Promise.resolve([available_shipping_method])
checkout = checkout_with_item
checkout.shipping_methods.create(
external_shipping_method_id=to_shipping_app_id(
external_app, "expired-shipping-method-id"
),
name="Old External Shipping name",
price_amount=Decimal(99),
currency="USD",
)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now()
checkout.save(update_fields=["shipping_address", "delivery_methods_stale_at"])
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
# Confirms that the non-applicable shipping method was removed and the
# new one was created
_assert_external_shipping_method(
checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_non_applicable_assigned_external_shipping_method(
mocked_webhook,
checkout_with_item,
plugins_manager,
address,
app,
external_app,
settings,
):
# given
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(Decimal(10), checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_webhook.return_value = Promise.resolve([available_shipping_method])
checkout = checkout_with_item
expired_app_id = to_shipping_app_id(external_app, "expired-shipping-method-id")
assigned_delivery = checkout.shipping_methods.create(
external_shipping_method_id=expired_app_id,
name="Old External Shipping name",
price_amount=Decimal(99),
currency="USD",
is_external=True,
)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now()
checkout.assigned_delivery = assigned_delivery
checkout.save()
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 1
new_checkout_delivery = shipping_methods[0]
assigned_delivery = CheckoutDelivery.objects.get(is_valid=False)
checkout.refresh_from_db()
# Assigned shipping method is never removed explicitly but marked as invalid
assert assigned_delivery.is_valid is False
assert assigned_delivery.external_shipping_method_id == expired_app_id
assert checkout.assigned_delivery_id == assigned_delivery.id
# Confirm that new shipping method was created
_assert_external_shipping_method(
new_checkout_delivery, available_shipping_method, checkout, settings
)
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_excluded_external_shipping_method(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
app,
settings,
):
# given
unavailable_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(Decimal(10), checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
exclude_reason = "This shipping method is not available."
mocked_exclude_shipping_methods.return_value = Promise.resolve(
[
ExcludedShippingMethod(
id=str(unavailable_shipping_method.id),
reason=exclude_reason,
)
]
)
mocked_list_shipping_methods.return_value = Promise.resolve(
[unavailable_shipping_method]
)
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None
).get()
# then
assert len(shipping_methods) == 1
checkout_delivery = shipping_methods[0]
assert isinstance(checkout_delivery, CheckoutDelivery)
assert checkout_delivery.active is False
assert checkout_delivery.message == exclude_reason
@freeze_time("2024-05-31 12:00:01")
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_changed_price_of_external_shipping_method(
mocked_webhook,
checkout_with_item,
plugins_manager,
address,
app,
settings,
):
# given
shipping_price_amount = Decimal(10)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(shipping_price_amount, checkout_with_item.currency),
active=False,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_webhook.return_value = Promise.resolve([available_shipping_method])
new_shipping_price_amount = shipping_price_amount + Decimal(99)
checkout = checkout_with_item
assigned_delivery = checkout.shipping_methods.create(
external_shipping_method_id=to_shipping_app_id(
app, "external-shipping-method-id"
),
name="External Shipping name",
price_amount=new_shipping_price_amount,
currency="USD",
)
checkout.assigned_delivery = assigned_delivery
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now()
checkout.save()
ShippingMethod.objects.all().delete()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
fetch_shipping_methods_for_checkout(checkout_info, requestor=None)
# then
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
# Changing the price of shipping method assigned to checkout
# caused that after fetching shipping methods, the checkout
# prices are marked as expired.
assert checkout.price_expiration == timezone.now()
# The assigned shipping method has updated price
assert assigned_delivery.price_amount == new_shipping_price_amount
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_none(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
):
# given
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve([])
checkout = checkout_with_item
checkout.shipping_address = address
checkout.assigned_delivery = None
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
assert len(shipping_methods) == 1
checkout.refresh_from_db()
assert checkout.assigned_delivery is None
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_invalid_and_refreshed_is_none(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
checkout_delivery,
external_app,
):
# given
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve([])
checkout = checkout_with_item
assigned_delivery = checkout.shipping_methods.create(
external_shipping_method_id=to_shipping_app_id(
external_app, "expired-shipping-method-id"
),
name="Old External Shipping name",
price_amount=Decimal(99),
currency="USD",
is_external=True,
is_valid=False,
)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 1
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
assigned_delivery.refresh_from_db()
assert not assigned_delivery.is_valid
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_valid_and_refreshed_is_none(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
external_app,
):
# given
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve([])
checkout = checkout_with_item
assigned_delivery = checkout.shipping_methods.create(
external_shipping_method_id=to_shipping_app_id(
external_app, "expired-shipping-method-id"
),
name="Old External Shipping name",
price_amount=Decimal(99),
currency="USD",
is_external=True,
is_valid=True,
)
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 1
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
assigned_delivery.refresh_from_db()
assert not assigned_delivery.is_valid
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_valid_and_refreshed_unchanged(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
app,
):
# given
shipping_price_amount = Decimal(10)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(shipping_price_amount, checkout_with_item.currency),
active=True,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve(
[available_shipping_method]
)
checkout = checkout_with_item
checkout.shipping_address = address
assigned_delivery = convert_shipping_method_data_to_checkout_delivery(
available_shipping_method, checkout
)
assigned_delivery.is_valid = True
assigned_delivery.save()
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 2
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
assigned_delivery.refresh_from_db()
assert assigned_delivery.is_valid
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_invalid_and_refreshed_unchanged(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
app,
):
# given
shipping_price_amount = Decimal(10)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(shipping_price_amount, checkout_with_item.currency),
active=True,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve(
[available_shipping_method]
)
checkout = checkout_with_item
assigned_delivery = convert_shipping_method_data_to_checkout_delivery(
available_shipping_method, checkout
)
assigned_delivery.is_valid = False
assigned_delivery.save()
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
# Invalid assigned delivery should be converted to valid
# The assigned delivery's ID should be the same as previously
assert CheckoutDelivery.objects.count() == 2
assert len(shipping_methods) == 2
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
assigned_delivery.refresh_from_db()
assert assigned_delivery.is_valid
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_valid_and_refreshed_changed(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
app,
):
# given
shipping_price_amount = Money(Decimal(10), checkout_with_item.currency)
changed_shipping_price_amount = Money(Decimal(11), checkout_with_item.currency)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=shipping_price_amount,
active=True,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
assigned_delivery = convert_shipping_method_data_to_checkout_delivery(
available_shipping_method, checkout_with_item
)
assigned_delivery.is_valid = True
assigned_delivery.save()
checkout = checkout_with_item
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
changed_shipping_method = available_shipping_method
changed_shipping_method.price = changed_shipping_price_amount
changed_shipping_method.name = "Modified"
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve(
[changed_shipping_method]
)
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
deliveries = CheckoutDelivery.objects.filter(
external_shipping_method_id=assigned_delivery.external_shipping_method_id
)
assert len(deliveries) == 2
assigned_delivery.refresh_from_db()
assert not assigned_delivery.is_valid
assert assigned_delivery.price == shipping_price_amount
refreshed_delivery = next(
delivery for delivery in deliveries if delivery.id != assigned_delivery.id
)
assert refreshed_delivery.price == changed_shipping_price_amount
assert (
refreshed_delivery.external_shipping_method_id
== assigned_delivery.external_shipping_method_id
)
assert refreshed_delivery.is_valid
assert len(shipping_methods) == 2
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_assigned_is_invalid_and_refreshed_changed(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
app,
):
# given
shipping_price_amount = Money(Decimal(10), checkout_with_item.currency)
changed_shipping_price_amount = Money(Decimal(11), checkout_with_item.currency)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=shipping_price_amount,
active=True,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
assigned_delivery = convert_shipping_method_data_to_checkout_delivery(
available_shipping_method, checkout_with_item
)
assigned_delivery.is_valid = False
assigned_delivery.save()
checkout = checkout_with_item
checkout.shipping_address = address
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
changed_shipping_method = available_shipping_method
changed_shipping_method.price = changed_shipping_price_amount
changed_shipping_method.name = "Modified"
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve(
[changed_shipping_method]
)
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
deliveries = CheckoutDelivery.objects.filter(
external_shipping_method_id=assigned_delivery.external_shipping_method_id
)
assert len(deliveries) == 2
assigned_delivery.refresh_from_db()
assert not assigned_delivery.is_valid
assert assigned_delivery.price == shipping_price_amount
refreshed_delivery = next(
delivery for delivery in deliveries if delivery.id != assigned_delivery.id
)
assert refreshed_delivery.price == changed_shipping_price_amount
assert refreshed_delivery.is_valid
assert len(shipping_methods) == 2
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
@mock.patch(
"saleor.checkout.webhooks.list_shipping_methods.list_shipping_methods_for_checkout"
)
def test_fetch_shipping_methods_for_checkout_with_preserve_when_refreshed_is_changed_to_match_assigned(
mocked_list_shipping_methods,
mocked_exclude_shipping_methods,
checkout_with_item,
plugins_manager,
address,
settings,
app,
):
# given
shipping_price_amount = Decimal(10)
available_shipping_method = ShippingMethodData(
id=to_shipping_app_id(app, "external-shipping-method-id"),
price=Money(shipping_price_amount, checkout_with_item.currency),
active=True,
name="External Shipping",
description="External Shipping Description",
maximum_delivery_days=10,
minimum_delivery_days=5,
metadata={
"key": "value",
},
)
mocked_exclude_shipping_methods.return_value = Promise.resolve([])
mocked_list_shipping_methods.return_value = Promise.resolve(
[available_shipping_method]
)
checkout = checkout_with_item
checkout.shipping_address = address
assigned_delivery = convert_shipping_method_data_to_checkout_delivery(
available_shipping_method, checkout
)
assigned_delivery.is_valid = False
assigned_delivery.save()
CheckoutDelivery.objects.create(
checkout=checkout,
external_shipping_method_id=assigned_delivery.external_shipping_method_id,
built_in_shipping_method_id=None,
name="Modified",
price_amount=Decimal(11),
currency=checkout.currency,
is_valid=True,
is_external=True,
)
checkout.delivery_methods_stale_at = timezone.now() - datetime.timedelta(minutes=5)
checkout.assigned_delivery = assigned_delivery
checkout.save()
lines_info, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines=lines_info, manager=plugins_manager
)
# when
shipping_methods = fetch_shipping_methods_for_checkout(
checkout_info, requestor=None, overwrite_assigned_delivery=False
).get()
# then
deliveries = CheckoutDelivery.objects.filter(
external_shipping_method_id=assigned_delivery.external_shipping_method_id
)
assert len(deliveries) == 1
assigned_delivery.refresh_from_db()
assert assigned_delivery.is_valid
assert len(shipping_methods) == 2
checkout.refresh_from_db()
assert checkout.assigned_delivery_id == assigned_delivery.id
def test_assign_delivery_method_to_checkout_delivery_method_to_none(
checkout_with_delivery_method_for_cc, plugins_manager
):
# given
checkout = checkout_with_delivery_method_for_cc
lines_info, _ = fetch_checkout_lines(checkout)
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
# when
assign_delivery_method_to_checkout(checkout_info, lines_info, plugins_manager, None)
# then
assert checkout_with_delivery_method_for_cc.collection_point_id is None
assert checkout_with_delivery_method_for_cc.shipping_address_id is None
assert checkout_info.collection_point is None
def test_assign_delivery_method_to_checkout_delivery_method_to_external(
checkout_with_shipping_method, shipping_app, plugins_manager
):
# given
checkout = checkout_with_shipping_method
lines_info, _ = fetch_checkout_lines(checkout)
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
app_shipping_id = "abcd"
app_shipping_name = "Shipping"
method_id = graphene.Node.to_global_id(
"app", f"{shipping_app.id}:{app_shipping_id}"
)
shipping_method = CheckoutDelivery.objects.create(
checkout=checkout,
external_shipping_method_id=method_id,
name=app_shipping_name,
price_amount="10.00",
currency="USD",
maximum_delivery_days=7,
is_external=True,
)
# when
assign_delivery_method_to_checkout(
checkout_info, lines_info, plugins_manager, shipping_method
)
# then
assert checkout.shipping_method_name == app_shipping_name
assert checkout.assigned_delivery.shipping_method_id == method_id
assert checkout.assigned_delivery.name == app_shipping_name
assert checkout_info.collection_point is None
def test_assign_delivery_method_to_checkout_delivery_method_to_cc(
checkout,
shipping_method_weight_based,
warehouses_for_cc,
checkout_delivery,
plugins_manager,
):
# given
checkout.assigned_delivery = checkout_delivery(
checkout, shipping_method_weight_based
)
checkout.shipping_method_name = shipping_method_weight_based.name
lines_info, _ = fetch_checkout_lines(checkout)
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
collection_point = warehouses_for_cc[0]
# when
assign_delivery_method_to_checkout(
checkout_info, lines_info, plugins_manager, collection_point
)
# then
assert checkout.collection_point == collection_point
assert checkout.shipping_address == collection_point.address
assert int(checkout.shipping_address_id) != int(collection_point.address.id)
assert checkout.assigned_delivery is None
assert checkout.shipping_method_name is None
def test_clear_cc_delivery_method(
checkout_with_delivery_method_for_cc, plugins_manager
):
# given
assert checkout_with_delivery_method_for_cc.collection_point_id
checkout_info = fetch_checkout_info(
checkout_with_delivery_method_for_cc, [], plugins_manager
)
# when
clear_cc_delivery_method(checkout_info)
# then
checkout_with_delivery_method_for_cc.refresh_from_db()
assert not checkout_with_delivery_method_for_cc.collection_point_id
assert isinstance(checkout_info.get_delivery_method_info(), DeliveryMethodBase)
def test_is_valid_delivery_method(
checkout_with_item, address, shipping_zone, checkout_delivery, plugins_manager
):
checkout = checkout_with_item
checkout.shipping_address = address
checkout.save()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
delivery_method_info = checkout_info.get_delivery_method_info()
# no shipping method assigned
assert not delivery_method_info.is_valid_delivery_method()
checkout.assigned_delivery = checkout_delivery(checkout)
checkout.save()
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
delivery_method_info = checkout_info.get_delivery_method_info()
assert delivery_method_info.is_valid_delivery_method()
checkout.assigned_delivery.active = False
checkout.assigned_delivery.save()
checkout_info = fetch_checkout_info(checkout, lines, plugins_manager)
delivery_method_info = checkout_info.get_delivery_method_info()
assert not delivery_method_info.is_method_in_valid_methods(checkout_info)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/tests/test_delivery_context.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/shipping/tests/webhooks/test_shared.py | from unittest import mock
import graphene
from promise import Promise
from ....webhook.event_types import WebhookEventSyncType
from ....webhook.models import Webhook
from ....webhook.response_schemas.shipping import logger as schema_logger
from ....webhook.response_schemas.utils.annotations import logger as annotations_logger
from ....webhook.transport.shipping_helpers import to_shipping_app_id
from ...webhooks.shared import (
_get_excluded_shipping_methods_from_response,
_get_excluded_shipping_methods_or_fetch,
)
@mock.patch.object(annotations_logger, "warning")
@mock.patch.object(schema_logger, "warning")
def test_get_excluded_shipping_methods_from_response(
mocked_schema_logger, mocked_annotations_logger, app
):
# given
external_id = to_shipping_app_id(app, "test-1234")
response = {
"excluded_methods": [
{
"id": "",
},
{
"id": "not-an-id",
},
{
"id": graphene.Node.to_global_id("Car", "1"),
},
{
"id": graphene.Node.to_global_id("ShippingMethod", "2"),
},
{
"id": external_id,
},
]
}
webhook = Webhook.objects.create(
name="shipping-webhook-1",
app=app,
target_url="https://shipping-gateway.com/apiv2/",
)
# when
excluded_methods = _get_excluded_shipping_methods_from_response(response, webhook)
# then
assert len(excluded_methods) == 2
assert excluded_methods[0].id == "2"
assert excluded_methods[1].id == external_id
assert mocked_schema_logger.call_count == 3
assert mocked_annotations_logger.call_count == 3
@mock.patch.object(annotations_logger, "warning")
@mock.patch.object(schema_logger, "warning")
def test_get_excluded_shipping_methods_from_response_invalid(
mocked_schema_logger, mocked_annotations_logger, app
):
# given
response = {
"excluded_methods": [
{
"id": "not-an-id",
},
]
}
webhook = Webhook.objects.create(
name="shipping-webhook-1",
app=app,
target_url="https://shipping-gateway.com/apiv2/",
)
# when
excluded_methods = _get_excluded_shipping_methods_from_response(response, webhook)
# then
assert not excluded_methods
assert mocked_schema_logger.call_count == 1
assert (
"Malformed ShippingMethod id was provided:"
in mocked_schema_logger.call_args[0][0]
)
assert mocked_annotations_logger.call_count == 1
error_msg = mocked_annotations_logger.call_args[0][1]
assert "Skipping invalid shipping method (FilterShippingMethodsSchema)" in error_msg
@mock.patch("saleor.shipping.webhooks.shared._parse_excluded_shipping_methods")
@mock.patch(
"saleor.webhook.transport.synchronous.transport.trigger_webhook_sync_promise"
)
@mock.patch(
"saleor.shipping.webhooks.shared._get_excluded_shipping_methods_from_response"
)
def test_get_excluded_shipping_methods_or_fetch_invalid_response_type(
mocked_get_excluded,
mocked_webhook_sync_trigger,
mocked_parse,
app,
checkout,
):
# given
mocked_webhook_sync_trigger.return_value = Promise.resolve(["incorrect_type"])
webhook = Webhook.objects.create(
name="Simple webhook", app=app, target_url="http://www.example.com/test"
)
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
webhook.events.create(event_type=event_type)
webhooks = Webhook.objects.all()
# when
_get_excluded_shipping_methods_or_fetch(
webhooks, event_type, '{"test":"payload"}', checkout, False, None, {}
).get()
# then
mocked_get_excluded.assert_not_called()
mocked_parse.assert_called_once_with([])
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/shipping/tests/webhooks/test_shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/shipping/webhooks/shared.py | import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Union
from django.conf import settings
from django.db.models import QuerySet
from promise import Promise
from pydantic import ValidationError
from ...webhook.models import Webhook
from ...webhook.response_schemas.shipping import (
ExcludedShippingMethodSchema,
FilterShippingMethodsSchema,
)
from ...webhook.transport.synchronous.transport import (
trigger_webhook_sync_promise,
trigger_webhook_sync_promise_if_not_cached,
)
from ..interface import ExcludedShippingMethod, ShippingMethodData
if TYPE_CHECKING:
from ...account.models import User
from ...app.models import App
from ...checkout.models import Checkout
from ...order.models import Order
CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3
logger = logging.getLogger(__name__)
def generate_payload_for_shipping_method(method: ShippingMethodData):
payload = {
"id": method.graphql_id,
"price": method.price.amount,
"currency": method.price.currency,
"name": method.name,
"maximum_order_weight": method.maximum_order_weight,
"minimum_order_weight": method.minimum_order_weight,
"maximum_delivery_days": method.maximum_delivery_days,
"minimum_delivery_days": method.minimum_delivery_days,
}
return payload
def _get_excluded_shipping_methods_from_response(
response_data: dict,
webhook: "Webhook",
) -> list[ExcludedShippingMethodSchema]:
excluded_methods = []
try:
filter_methods_schema = FilterShippingMethodsSchema.model_validate(
response_data,
context={
"custom_message": "Skipping invalid shipping method (FilterShippingMethodsSchema)"
},
)
excluded_methods.extend(filter_methods_schema.excluded_methods)
except ValidationError:
logger.warning(
"Skipping invalid response from app %s: %s",
str(webhook.app.identifier),
response_data,
)
return excluded_methods
def _parse_excluded_shipping_methods(
excluded_methods: list[ExcludedShippingMethodSchema],
) -> dict[str, list[ExcludedShippingMethod]]:
"""Prepare method_id to excluded methods map."""
excluded_methods_map = defaultdict(list)
for excluded_method in excluded_methods:
method_id = excluded_method.id
reason = excluded_method.reason or ""
excluded_methods_map[method_id].append(
ExcludedShippingMethod(id=method_id, reason=reason)
)
return excluded_methods_map
def _get_excluded_shipping_methods_or_fetch(
webhooks: QuerySet,
event_type: str,
static_payload: str,
subscribable_object: "tuple[Order | Checkout, list[ShippingMethodData]]",
allow_replica: bool,
requestor: Union["App", "User", None],
cache_data: dict | None,
) -> Promise[dict[str, list[ExcludedShippingMethod]]]:
"""Return data of all excluded shipping methods.
The data will be fetched from the cache if present and cache_data was provided.
If missing it will fetch it from all defined webhooks by calling a request to
each of them one by one.
"""
promised_responses = []
for webhook in webhooks:
# The approach for Order and Checkout is the same, except that
# Checkout does not need a cache anymore as all deliveries and their
# data are denormalized. The same flow will be introduced for the Order
# but for now we use cache_data as a indicator to decide if we want to
# use the cache approach. This will be fully dropped after introducing
# denormalized deliveries for Order.
if cache_data is not None:
response_promise = trigger_webhook_sync_promise_if_not_cached(
event_type=event_type,
static_payload=static_payload,
webhook=webhook,
cache_data=cache_data,
allow_replica=allow_replica,
subscribable_object=subscribable_object,
request_timeout=settings.WEBHOOK_SYNC_TIMEOUT,
cache_timeout=CACHE_EXCLUDED_SHIPPING_TIME,
requestor=requestor,
)
else:
response_promise = trigger_webhook_sync_promise(
event_type=event_type,
static_payload=static_payload,
webhook=webhook,
allow_replica=allow_replica,
subscribable_object=subscribable_object,
requestor=requestor,
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
)
promised_responses.append(response_promise)
def process_responses(
responses: list[Any],
) -> dict[str, list[ExcludedShippingMethod]]:
excluded_methods: list[ExcludedShippingMethodSchema] = []
for response_data, webhook in zip(responses, webhooks, strict=True):
if response_data and isinstance(response_data, dict):
excluded_methods.extend(
_get_excluded_shipping_methods_from_response(response_data, webhook)
)
return _parse_excluded_shipping_methods(excluded_methods)
return Promise.all(promised_responses).then(process_responses)
def get_excluded_shipping_data(
webhooks: QuerySet[Webhook],
event_type: str,
static_payload: str,
subscribable_object: "tuple[Order | Checkout, list[ShippingMethodData]]",
allow_replica: bool,
requestor: Union["App", "User", None],
cache_data: dict | None,
) -> Promise[list[ExcludedShippingMethod]]:
"""Exclude not allowed shipping methods by sync webhook.
Fetch excluded shipping methods from sync webhooks and return them as a list of
excluded shipping methods.
When cache_data is provided, the function uses it to built cache and reduce the number of
requests which we call to the external APIs. In case when we have the same payload
in a cache as we're going to send now, we will skip an additional request and use
the response fetched from cache.
The function will fetch the payload only in the case that we have any defined
webhook.
"""
def merge_excluded_methods_map(
excluded_methods_map: dict[str, list[ExcludedShippingMethod]],
) -> list[ExcludedShippingMethod]:
excluded_methods = []
for method_id, methods in excluded_methods_map.items():
reason = None
if reasons := [m.reason for m in methods if m.reason]:
reason = " ".join(reasons)
excluded_methods.append(ExcludedShippingMethod(id=method_id, reason=reason))
return excluded_methods
return _get_excluded_shipping_methods_or_fetch(
webhooks,
event_type,
static_payload,
subscribable_object,
allow_replica,
requestor,
cache_data=cache_data,
).then(merge_excluded_methods_map)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/shipping/webhooks/shared.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/core/tests/test_text.py | from ..utils.text import strip_accents
def test_strip_accents_removes_diacritics():
assert strip_accents("Magnésium") == "Magnesium"
def test_strip_accents_removes_multiple_diacritics():
assert strip_accents("café") == "cafe"
def test_strip_accents_removes_diaeresis():
assert strip_accents("naïve") == "naive"
def test_strip_accents_preserves_ascii():
assert strip_accents("hello world") == "hello world"
def test_strip_accents_empty_string():
assert strip_accents("") == ""
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/tests/test_text.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/tests/webhooks/subscriptions/test_list_shipping_methods.py | import json
import graphene
import pytest
from .....shipping.interface import ShippingMethodData
from .....shipping.models import ShippingMethod
from .....shipping.utils import convert_to_shipping_method_data
from .....webhook.event_types import WebhookEventSyncType
from .....webhook.transport.asynchronous.transport import (
create_deliveries_for_subscriptions,
)
from .....webhook.transport.synchronous.transport import (
create_delivery_for_subscription_sync_event,
)
SHIPPING_LIST_METHODS_FOR_CHECKOUT = """
subscription{
event{
...on ShippingListMethodsForCheckout{
checkout{
id
}
shippingMethods{
name
id
}
}
}
}
"""
@pytest.fixture
def subscription_shipping_list_methods_for_checkout_webhook(subscription_webhook):
return subscription_webhook(
SHIPPING_LIST_METHODS_FOR_CHECKOUT,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
)
@pytest.fixture
def subscription_checkout_shipping_filter_and_list_missing_one_in_definition(
subscription_webhook,
):
from .....webhook.tests.subscription_webhooks import (
subscription_queries as queries,
)
return subscription_webhook(
queries.THUMBNAIL_CREATED,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
)
def test_shipping_list_methods_for_checkout(
checkout_with_shipping_required,
subscription_shipping_list_methods_for_checkout_webhook,
address,
shipping_method,
):
# given
checkout = checkout_with_shipping_required
checkout.shipping_address = address
checkout.shipping_method = shipping_method
webhooks = [subscription_shipping_list_methods_for_checkout_webhook]
event_type = WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
internal_methods: list[ShippingMethodData] = []
for method in ShippingMethod.objects.all():
shipping_method_data = convert_to_shipping_method_data(
method, method.channel_listings.get(channel=checkout.channel)
)
internal_methods.append(shipping_method_data)
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout, internal_methods), webhooks
)
# then
shipping_methods = [
{
"id": graphene.Node.to_global_id("ShippingMethod", sm.id),
"name": sm.name,
}
for sm in internal_methods
]
payload = json.loads(deliveries[0].payload.get_payload())
assert payload["checkout"] == {"id": checkout_id}
for method in shipping_methods:
assert method in payload["shippingMethods"]
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_checkout_list_methods_mismatch_in_subscription_query_definition(
checkout_ready_to_complete,
subscription_checkout_shipping_filter_and_list_missing_one_in_definition,
):
# given
webhook = subscription_checkout_shipping_filter_and_list_missing_one_in_definition
event_type = WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT
# when
deliveries = create_delivery_for_subscription_sync_event(
event_type, checkout_ready_to_complete, webhook
)
# then
assert not deliveries
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/tests/webhooks/subscriptions/test_list_shipping_methods.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/webhooks/list_shipping_methods.py | import logging
from typing import TYPE_CHECKING, Any, Union
from django.conf import settings
from promise import Promise
from pydantic import ValidationError
from ...shipping.interface import ShippingMethodData
from ...webhook.event_types import WebhookEventSyncType
from ...webhook.payloads import generate_checkout_payload
from ...webhook.response_schemas.shipping import ListShippingMethodsSchema
from ...webhook.transport.synchronous.transport import trigger_webhook_sync_promise
from ...webhook.utils import get_webhooks_for_event
from ..models import Checkout
if TYPE_CHECKING:
from ...account.models import User
from ...app.models import App
logger = logging.getLogger(__name__)
def list_shipping_methods_for_checkout(
checkout: "Checkout",
built_in_shipping_methods: list["ShippingMethodData"],
allow_replica: bool,
requestor: Union["App", "User", None],
) -> Promise[list["ShippingMethodData"]]:
methods: list[ShippingMethodData] = []
event_type = WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT
webhooks = get_webhooks_for_event(event_type)
if not webhooks:
return Promise.resolve(methods)
promised_responses = []
payload = generate_checkout_payload(checkout, requestor)
for webhook in webhooks:
promised_responses.append(
trigger_webhook_sync_promise(
event_type=event_type,
static_payload=payload,
webhook=webhook,
allow_replica=allow_replica,
subscribable_object=(checkout, built_in_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=requestor,
)
)
def process_responses(responses: list[Any]):
for response_data, webhook in zip(responses, webhooks, strict=True):
if response_data:
shipping_methods = _parse_list_shipping_methods_response(
response_data, webhook.app, checkout.currency
)
methods.extend(shipping_methods)
return [
method for method in methods if method.price.currency == checkout.currency
]
return Promise.all(promised_responses).then(process_responses)
def _parse_list_shipping_methods_response(
response_data: Any, app: "App", object_currency: str
) -> list["ShippingMethodData"]:
try:
list_shipping_method_model = ListShippingMethodsSchema.model_validate(
response_data,
context={
"app": app,
"currency": object_currency,
"custom_message": "Skipping invalid shipping method (ListShippingMethodsSchema)",
},
)
except ValidationError:
logger.warning("Skipping invalid shipping method response: %s", response_data)
return []
return [
ShippingMethodData(
id=shipping_method.id,
name=shipping_method.name,
price=shipping_method.price,
maximum_delivery_days=shipping_method.maximum_delivery_days,
minimum_delivery_days=shipping_method.minimum_delivery_days,
description=shipping_method.description,
metadata=shipping_method.metadata,
private_metadata=shipping_method.private_metadata,
)
for shipping_method in list_shipping_method_model.root
]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/webhooks/list_shipping_methods.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/checkout/tests/webhooks/static_payloads/test_exclude_shipping.py | import json
from decimal import Decimal
import graphene
from measurement.measures import Weight
from prices import Money
from .....shipping.interface import ShippingMethodData
from ....webhooks.exclude_shipping import (
_generate_excluded_shipping_methods_for_checkout_payload,
)
def test_generate_excluded_shipping_methods_for_checkout(checkout):
shipping_method = ShippingMethodData(
id="123",
price=Money(Decimal("10.59"), "USD"),
name="shipping",
maximum_order_weight=Weight(kg=10),
minimum_order_weight=Weight(g=1),
maximum_delivery_days=10,
minimum_delivery_days=2,
)
response = json.loads(
_generate_excluded_shipping_methods_for_checkout_payload(
checkout, [shipping_method]
)
)
assert "checkout" in response
assert response["shipping_methods"] == [
{
"id": graphene.Node.to_global_id("ShippingMethod", "123"),
"price": "10.59",
"currency": "USD",
"name": "shipping",
"maximum_order_weight": "10.0:kg",
"minimum_order_weight": "1.0:g",
"maximum_delivery_days": 10,
"minimum_delivery_days": 2,
}
]
def test_generate_excluded_shipping_methods_for_checkout_payload(
checkout_with_items,
):
# given
shipping_method = ShippingMethodData(
id="123",
price=Money(Decimal("10.59"), "USD"),
name="shipping",
maximum_order_weight=Weight(kg=10),
minimum_order_weight=Weight(g=1),
maximum_delivery_days=10,
minimum_delivery_days=2,
)
# when
json_payload = json.loads(
_generate_excluded_shipping_methods_for_checkout_payload(
checkout_with_items, available_shipping_methods=[shipping_method]
)
)
# then
assert len(json_payload["shipping_methods"]) == 1
assert json_payload["shipping_methods"][0]["id"] == graphene.Node.to_global_id(
"ShippingMethod", shipping_method.id
)
assert "checkout" in json_payload
assert "channel" in json_payload["checkout"]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/tests/webhooks/static_payloads/test_exclude_shipping.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/tests/webhooks/subscriptions/test_exclude_shipping.py | import json
import graphene
import pytest
from .....shipping.interface import ShippingMethodData
from .....shipping.models import ShippingMethod
from .....shipping.utils import convert_to_shipping_method_data
from .....webhook.event_types import WebhookEventSyncType
from .....webhook.transport.asynchronous.transport import (
create_deliveries_for_subscriptions,
)
CHECKOUT_FILTER_SHIPPING_METHODS = """
subscription{
event{
...on CheckoutFilterShippingMethods{
checkout{
id
}
shippingMethods{
name
id
}
}
}
}
"""
CHECKOUT_FILTER_SHIPPING_METHODS_CIRCULAR_SHIPPING_METHODS = """
subscription{
event{
...on CheckoutFilterShippingMethods{
checkout{
id
shippingMethods{
id
}
}
}
}
}
"""
CHECKOUT_FILTER_SHIPPING_METHODS_AVAILABLE_SHIPPING_METHODS = """
subscription{
event{
...on CheckoutFilterShippingMethods{
checkout{
id
availableShippingMethods{
id
}
}
}
}
}
"""
CHECKOUT_FILTER_SHIPPING_METHODS_AVAILABLE_PAYMENT_GATEWAYS = """
subscription{
event{
...on CheckoutFilterShippingMethods{
checkout{
id
availablePaymentGateways{
id
}
}
}
}
}
"""
@pytest.fixture
def subscription_checkout_filter_shipping_methods_webhook(subscription_webhook):
return subscription_webhook(
CHECKOUT_FILTER_SHIPPING_METHODS,
WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
)
@pytest.fixture
def subscription_checkout_filter_shipping_method_webhook_with_shipping_methods(
subscription_webhook,
):
return subscription_webhook(
CHECKOUT_FILTER_SHIPPING_METHODS_CIRCULAR_SHIPPING_METHODS,
WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
)
@pytest.fixture
def subscription_checkout_filter_shipping_method_webhook_with_available_ship_methods(
subscription_webhook,
):
return subscription_webhook(
CHECKOUT_FILTER_SHIPPING_METHODS_AVAILABLE_SHIPPING_METHODS,
WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
)
@pytest.fixture
def subscription_checkout_filter_shipping_method_webhook_with_payment_gateways(
subscription_webhook,
):
return subscription_webhook(
CHECKOUT_FILTER_SHIPPING_METHODS_AVAILABLE_PAYMENT_GATEWAYS,
WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
)
def test_checkout_filter_shipping_methods(
checkout_with_shipping_required,
subscription_checkout_filter_shipping_methods_webhook,
address,
shipping_method,
):
# given
checkout = checkout_with_shipping_required
checkout.shipping_address = address
checkout.shipping_method = shipping_method
webhooks = [subscription_checkout_filter_shipping_methods_webhook]
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
internal_methods: list[ShippingMethodData] = []
for method in ShippingMethod.objects.all():
shipping_method_data = convert_to_shipping_method_data(
method, method.channel_listings.get(channel=checkout.channel)
)
internal_methods.append(shipping_method_data)
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout, internal_methods), webhooks
)
# then
shipping_methods = [
{
"id": graphene.Node.to_global_id("ShippingMethod", sm.id),
"name": sm.name,
}
for sm in internal_methods
]
payload = json.loads(deliveries[0].payload.get_payload())
assert payload["checkout"] == {"id": checkout_id}
for method in shipping_methods:
assert method in payload["shippingMethods"]
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_checkout_filter_shipping_methods_no_methods_in_channel(
checkout,
subscription_checkout_filter_shipping_methods_webhook,
address,
shipping_method,
shipping_method_channel_PLN,
):
# given
webhooks = [subscription_checkout_filter_shipping_methods_webhook]
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout, []), webhooks
)
# then
expected_payload = {"checkout": {"id": checkout_id}, "shippingMethods": []}
assert json.loads(deliveries[0].payload.get_payload()) == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_checkout_filter_shipping_methods_with_circular_call_for_shipping_methods(
checkout_ready_to_complete,
subscription_checkout_filter_shipping_method_webhook_with_shipping_methods,
):
# given
webhooks = [
subscription_checkout_filter_shipping_method_webhook_with_shipping_methods
]
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout_ready_to_complete, []), webhooks
)
# then
payload = json.loads(deliveries[0].payload.get_payload())
assert len(payload["errors"]) == 1
assert (
payload["errors"][0]["message"]
== "Resolving this field is not allowed in synchronous events."
)
assert payload["checkout"] is None
def test_checkout_filter_shipping_methods_with_available_shipping_methods_field(
checkout_ready_to_complete,
subscription_checkout_filter_shipping_method_webhook_with_available_ship_methods,
):
# given
webhooks = [
subscription_checkout_filter_shipping_method_webhook_with_available_ship_methods
]
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout_ready_to_complete, []), webhooks
)
# then
payload = json.loads(deliveries[0].payload.get_payload())
assert len(payload["errors"]) == 1
assert (
payload["errors"][0]["message"]
== "Resolving this field is not allowed in synchronous events."
)
assert payload["checkout"] is None
def test_checkout_filter_shipping_methods_with_circular_call_for_available_gateways(
checkout_ready_to_complete,
subscription_checkout_filter_shipping_method_webhook_with_payment_gateways,
):
# given
webhooks = [
subscription_checkout_filter_shipping_method_webhook_with_payment_gateways
]
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
# when
deliveries = create_deliveries_for_subscriptions(
event_type, (checkout_ready_to_complete, []), webhooks
)
# then
payload = json.loads(deliveries[0].payload.get_payload())
assert len(payload["errors"]) == 1
assert (
payload["errors"][0]["message"]
== "Resolving this field is not allowed in synchronous events."
)
assert payload["checkout"] is None
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/tests/webhooks/subscriptions/test_exclude_shipping.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/tests/webhooks/test_exclude_shipping.py | import json
import uuid
from decimal import Decimal
from unittest import mock
import graphene
import pytest
from measurement.measures import Weight
from prices import Money
from promise import Promise
from ....graphql.core.utils import to_global_id_or_none
from ....graphql.tests.utils import get_graphql_content
from ....shipping.interface import ExcludedShippingMethod, ShippingMethodData
from ....shipping.models import ShippingMethod
from ....webhook.event_types import WebhookEventSyncType
from ....webhook.models import Webhook
from ...webhooks.exclude_shipping import (
excluded_shipping_methods_for_checkout,
)
CHECKOUT_QUERY_SHIPPING_METHOD = """
query Checkout($id: ID){
checkout(id: $id) {
shippingMethods {
id
name
active
}
availableShippingMethods {
id
name
active
}
}
}
"""
@pytest.fixture
def available_shipping_methods():
return [
ShippingMethodData(
id="1",
price=Money(Decimal(10), "usd"),
name=uuid.uuid4().hex,
maximum_order_weight=Weight(kg=0),
minimum_order_weight=Weight(kg=0),
maximum_delivery_days=0,
minimum_delivery_days=5,
),
ShippingMethodData(
id="2",
price=Money(Decimal(10), "usd"),
name=uuid.uuid4().hex,
maximum_order_weight=Weight(kg=0),
minimum_order_weight=Weight(kg=0),
maximum_delivery_days=0,
minimum_delivery_days=5,
),
]
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
def test_checkout_deliveries(
mocked_webhook,
staff_api_client,
checkout_ready_to_complete,
permission_manage_checkouts,
settings,
shipping_method_weight_based,
):
# given
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
webhook_reason = "spanish-inquisition"
excluded_shipping_method_id = (
checkout_ready_to_complete.assigned_delivery.shipping_method_id
)
mocked_webhook.return_value = Promise.resolve(
[ExcludedShippingMethod(excluded_shipping_method_id, webhook_reason)]
)
staff_api_client.user.user_permissions.add(permission_manage_checkouts)
# when
response = staff_api_client.post_graphql(
CHECKOUT_QUERY_SHIPPING_METHOD,
variables={"id": to_global_id_or_none(checkout_ready_to_complete)},
)
content = get_graphql_content(response)
checkout_data = content["data"]["checkout"]
shipping_methods = checkout_data["shippingMethods"]
# then
assert len(shipping_methods) == 2
inactive_method = list(
filter(
lambda s: s["id"]
== graphene.Node.to_global_id(
"ShippingMethod", excluded_shipping_method_id
),
shipping_methods,
)
)
assert not inactive_method[0]["active"]
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
def test_checkout_available_shipping_methods(
mocked_webhook,
staff_api_client,
checkout_ready_to_complete,
permission_manage_checkouts,
settings,
shipping_method_weight_based,
):
# given
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
webhook_reason = "spanish-inquisition"
excluded_shipping_method_id = (
checkout_ready_to_complete.assigned_delivery.shipping_method_id
)
mocked_webhook.return_value = Promise.resolve(
[ExcludedShippingMethod(excluded_shipping_method_id, webhook_reason)]
)
staff_api_client.user.user_permissions.add(permission_manage_checkouts)
# when
response = staff_api_client.post_graphql(
CHECKOUT_QUERY_SHIPPING_METHOD,
variables={"id": to_global_id_or_none(checkout_ready_to_complete)},
)
content = get_graphql_content(response)
shipping_methods = content["data"]["checkout"]["availableShippingMethods"]
# then
assert len(shipping_methods) == 1
assert shipping_methods[0]["active"]
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping.excluded_shipping_methods_for_checkout"
)
def test_checkout_deliveries_webhook_called_once(
mocked_webhook,
staff_api_client,
checkout_ready_to_complete,
permission_manage_checkouts,
):
# given
mocked_webhook.side_effect = [Promise.resolve([]), AssertionError("called twice.")]
staff_api_client.user.user_permissions.add(permission_manage_checkouts)
# when
response = staff_api_client.post_graphql(
CHECKOUT_QUERY_SHIPPING_METHOD,
variables={"id": to_global_id_or_none(checkout_ready_to_complete)},
)
content = get_graphql_content(response)
checkout_data = content["data"]["checkout"]
# then
expected_count = ShippingMethod.objects.count()
assert len(checkout_data["availableShippingMethods"]) == expected_count
assert len(checkout_data["shippingMethods"]) == expected_count
assert mocked_webhook.called
@mock.patch("saleor.shipping.webhooks.shared.trigger_webhook_sync_promise")
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping."
"_generate_excluded_shipping_methods_for_checkout_payload"
)
def test_excluded_shipping_methods_for_checkout_webhook_with_subscription(
mocked_static_payload,
mocked_webhook,
checkout_with_items,
available_shipping_methods,
exclude_shipping_app_with_subscription,
settings,
):
# given
shipping_app = exclude_shipping_app_with_subscription
shipping_webhook = shipping_app.webhooks.get()
webhook_reason = "Checkout contains dangerous products."
webhook_response = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_reason,
}
]
}
mocked_webhook.return_value = Promise.resolve(webhook_response)
payload_dict = {"checkout": {"id": 1, "some_field": "12"}}
payload = json.dumps(payload_dict)
mocked_static_payload.return_value = payload
# when
excluded_methods = excluded_shipping_methods_for_checkout(
checkout_with_items,
available_shipping_methods=available_shipping_methods,
allow_replica=False,
requestor=None,
).get()
# then
assert len(excluded_methods) == 1
em = excluded_methods[0]
assert em.id == "1"
assert webhook_reason in em.reason
mocked_webhook.assert_called_once_with(
event_type=WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
webhook=shipping_webhook,
allow_replica=False,
static_payload=payload,
subscribable_object=(checkout_with_items, available_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=None,
)
@mock.patch("saleor.shipping.webhooks.shared.trigger_webhook_sync_promise")
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping."
"_generate_excluded_shipping_methods_for_checkout_payload"
)
def test_multiple_app_with_excluded_shipping_methods_for_checkout(
mocked_payload,
mocked_webhook,
checkout_with_items,
available_shipping_methods,
app_exclude_shipping_for_checkout,
second_app_exclude_shipping_for_checkout,
settings,
):
# given
shipping_app = app_exclude_shipping_for_checkout
shipping_webhook = shipping_app.webhooks.get()
second_shipping_app = second_app_exclude_shipping_for_checkout
second_shipping_webhook = second_shipping_app.webhooks.get()
webhook_reason = "Checkout contains dangerous products."
webhook_second_reason = "Shipping is not applicable for this checkout."
first_webhook_response = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_reason,
}
]
}
second_webhook_response = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_second_reason,
},
{
"id": graphene.Node.to_global_id("ShippingMethod", "2"),
"reason": webhook_second_reason,
},
]
}
mocked_webhook.side_effect = [
Promise.resolve(first_webhook_response),
Promise.resolve(second_webhook_response),
]
payload_dict = {"checkout": {"id": 1, "some_field": "12"}}
payload = json.dumps(payload_dict)
mocked_payload.return_value = payload
# when
excluded_methods = excluded_shipping_methods_for_checkout(
checkout_with_items,
available_shipping_methods=available_shipping_methods,
allow_replica=False,
requestor=None,
).get()
# then
assert len(excluded_methods) == 2
em = excluded_methods[0]
assert em.id == "1"
assert webhook_reason in em.reason
assert webhook_second_reason in em.reason
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
mocked_webhook.assert_any_call(
event_type=event_type,
webhook=shipping_webhook,
allow_replica=False,
static_payload=payload,
subscribable_object=(checkout_with_items, available_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=None,
)
mocked_webhook.assert_any_call(
event_type=event_type,
webhook=second_shipping_webhook,
allow_replica=False,
static_payload=payload,
subscribable_object=(checkout_with_items, available_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=None,
)
assert mocked_webhook.call_count == 2
@mock.patch("saleor.shipping.webhooks.shared.trigger_webhook_sync_promise")
@mock.patch(
"saleor.checkout.webhooks.exclude_shipping."
"_generate_excluded_shipping_methods_for_checkout_payload"
)
def test_multiple_webhooks_on_the_same_app_with_excluded_shipping_methods_for_checkout(
mocked_payload,
mocked_webhook,
checkout_with_items,
available_shipping_methods,
app_exclude_shipping_for_checkout,
settings,
):
# given
shipping_app = app_exclude_shipping_for_checkout
first_webhook = shipping_app.webhooks.get()
event_type = WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
# create the second webhook with the same event
second_webhook = Webhook.objects.create(
name="shipping-webhook-1",
app=shipping_app,
target_url="https://shipping-gateway.com/apiv2/",
)
second_webhook.events.create(
event_type=event_type,
webhook=second_webhook,
)
webhook_reason = "Checkout contains dangerous products."
webhook_second_reason = "Shipping is not applicable for this checkout."
first_webhook_response = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_reason,
}
]
}
second_webhook_response = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_second_reason,
},
{
"id": graphene.Node.to_global_id("ShippingMethod", "2"),
"reason": webhook_second_reason,
},
]
}
mocked_webhook.side_effect = [
Promise.resolve(first_webhook_response),
Promise.resolve(second_webhook_response),
]
payload_dict = {"checkout": {"id": 1, "some_field": "12"}}
payload = json.dumps(payload_dict)
mocked_payload.return_value = payload
# when
excluded_methods = excluded_shipping_methods_for_checkout(
checkout=checkout_with_items,
available_shipping_methods=available_shipping_methods,
allow_replica=False,
requestor=None,
).get()
# then
assert len(excluded_methods) == 2
em = excluded_methods[0]
assert em.id == "1"
assert webhook_reason in em.reason
assert webhook_second_reason in em.reason
mocked_webhook.assert_any_call(
event_type=event_type,
webhook=first_webhook,
allow_replica=False,
static_payload=payload,
subscribable_object=(checkout_with_items, available_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=None,
)
mocked_webhook.assert_any_call(
event_type=event_type,
webhook=second_webhook,
allow_replica=False,
static_payload=payload,
subscribable_object=(checkout_with_items, available_shipping_methods),
timeout=settings.WEBHOOK_SYNC_TIMEOUT,
requestor=None,
)
assert mocked_webhook.call_count == 2
@mock.patch("saleor.webhook.transport.synchronous.transport.send_webhook_request_sync")
def test_excluded_shipping_methods_for_checkout(
mocked_webhook,
checkout_with_items,
available_shipping_methods,
app_exclude_shipping_for_checkout,
):
# given
webhook_reason = "Order contains dangerous products."
mocked_webhook.return_value = {
"excluded_methods": [
{
"id": graphene.Node.to_global_id("ShippingMethod", "1"),
"reason": webhook_reason,
}
]
}
# when
excluded_methods = excluded_shipping_methods_for_checkout(
checkout_with_items,
available_shipping_methods=available_shipping_methods,
allow_replica=False,
requestor=None,
).get()
# then
assert len(excluded_methods) == 1
em = excluded_methods[0]
assert em.id == "1"
assert webhook_reason in em.reason
mocked_webhook.assert_called_once()
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/tests/webhooks/test_exclude_shipping.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/checkout/webhooks/exclude_shipping.py | import json
import logging
from typing import TYPE_CHECKING, Union
from promise import Promise
from ...core.db.connection import allow_writer
from ...core.utils.json_serializer import CustomJsonEncoder
from ...shipping.interface import ExcludedShippingMethod, ShippingMethodData
from ...shipping.webhooks.shared import (
generate_payload_for_shipping_method,
get_excluded_shipping_data,
)
from ...webhook import traced_payload_generator
from ...webhook.event_types import WebhookEventSyncType
from ...webhook.payloads import (
generate_checkout_payload,
)
from ...webhook.utils import get_webhooks_for_event
from ..models import Checkout
if TYPE_CHECKING:
from ...account.models import User
from ...app.models import App
logger = logging.getLogger(__name__)
def excluded_shipping_methods_for_checkout(
checkout: "Checkout",
available_shipping_methods: list["ShippingMethodData"],
allow_replica: bool,
requestor: Union["App", "User", None],
) -> Promise[list[ExcludedShippingMethod]]:
if not available_shipping_methods:
return Promise.resolve([])
webhooks = get_webhooks_for_event(
WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS
)
if not webhooks:
return Promise.resolve([])
static_payload = _generate_excluded_shipping_methods_for_checkout_payload(
checkout,
available_shipping_methods,
)
return get_excluded_shipping_data(
webhooks=webhooks,
event_type=WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
static_payload=static_payload,
subscribable_object=(checkout, available_shipping_methods),
allow_replica=allow_replica,
requestor=requestor,
# Set cache to None as Checkout doesn't use cache flow anymore
# This field will be fully dropped after moving Order to new
# flow.
cache_data=None,
)
@allow_writer()
@traced_payload_generator
def _generate_excluded_shipping_methods_for_checkout_payload(
checkout: "Checkout",
available_shipping_methods: list[ShippingMethodData],
):
checkout_data = json.loads(generate_checkout_payload(checkout))[0]
payload = {
"checkout": checkout_data,
"shipping_methods": [
generate_payload_for_shipping_method(shipping_method)
for shipping_method in available_shipping_methods
],
}
return json.dumps(payload, cls=CustomJsonEncoder)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/checkout/webhooks/exclude_shipping.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/core/search.py | import re
from typing import TYPE_CHECKING
from django.contrib.postgres.search import SearchQuery, SearchRank
from django.db.models import F, Value
from .utils.text import strip_accents
if TYPE_CHECKING:
from django.db.models import QuerySet
def _sanitize_word(word: str) -> str:
"""Remove PostgreSQL tsquery metacharacters from a word.
Replaces special characters that have meaning in tsquery syntax with spaces,
so that e.g. "22:20" becomes "22 20" (two separate tokens) rather than "2220".
Preserved: alphanumeric, underscore, hyphen, @, period
Replaced with space: parentheses, &, |, !, :, <, >, ', *
"""
return re.sub(r"[()&|!:<>\'*]", " ", word).strip()
def _tokenize(value: str) -> list[dict]:
"""Tokenize a search string into structured tokens.
Recognizes:
- Quoted phrases: "green tea"
- OR operator: coffee OR tea
- Negation: -decaf
- Regular words: coffee
"""
tokens: list[dict] = []
i = 0
while i < len(value):
# Skip whitespace
if value[i].isspace():
i += 1
continue
# Check for negation prefix
negated = False
if value[i] == "-" and i + 1 < len(value) and not value[i + 1].isspace():
negated = True
i += 1
# Quoted phrase
if i < len(value) and value[i] == '"':
end = value.find('"', i + 1)
if end == -1:
phrase = value[i + 1 :]
i = len(value)
else:
phrase = value[i + 1 : end]
i = end + 1
words = []
for w in phrase.split():
sanitized = _sanitize_word(w)
words.extend(sanitized.split())
if words:
tokens.append({"type": "phrase", "words": words, "negated": negated})
continue
# Read the next word
end = i
while end < len(value) and not value[end].isspace():
end += 1
raw_word = value[i:end]
i = end
# OR operator (must not be negated)
if raw_word == "OR" and not negated:
tokens.append({"type": "or"})
continue
# AND operator – explicit AND is a no-op (AND is implicit between terms)
if raw_word == "AND" and not negated:
continue
sanitized = _sanitize_word(raw_word)
for word in sanitized.split():
tokens.append({"type": "word", "word": word, "negated": negated})
return tokens
def parse_search_query(value: str) -> str | None:
"""Parse a search string into a raw PostgreSQL tsquery with prefix matching.
Supports websearch-compatible syntax:
- Multiple words (implicit AND): "coffee shop" -> coffee:* & shop:*
- OR operator: "coffee OR tea" -> coffee:* | tea:*
- Negation: "-decaf" -> !decaf:*
- Quoted phrases (exact match): '"green tea"' -> (green <-> tea)
Returns:
A raw tsquery string, or None if the input yields no searchable terms.
"""
value = value.strip()
if not value:
return None
tokens = _tokenize(value)
if not tokens:
return None
parts: list[str] = []
pending_connector = " & "
for token in tokens:
if token["type"] == "or":
pending_connector = " | "
continue
# Insert connector between terms
if parts:
parts.append(pending_connector)
pending_connector = " & "
neg = "!" if token["negated"] else ""
if token["type"] == "word":
parts.append(f"{neg}{token['word']}:*")
elif token["type"] == "phrase":
words = token["words"]
if len(words) == 1:
parts.append(f"{neg}{words[0]}")
else:
# Use <-> (followed-by) for phrase matching
phrase_tsquery = " <-> ".join(words)
if neg:
parts.append(f"!({phrase_tsquery})")
else:
parts.append(f"({phrase_tsquery})")
result = "".join(parts)
return result if result else None
def prefix_search(qs: "QuerySet", value: str) -> "QuerySet":
"""Apply prefix-based search to a queryset with perfect match prioritization.
Supports websearch-compatible syntax (AND, OR, negation, quoted phrases)
while adding prefix matching so partial words produce results.
Scoring: exact (websearch) matches get 2x weight, prefix matches get 1x.
The queryset must have a ``search_vector`` SearchVectorField.
"""
if not value:
# return a original queryset annotated with search_rank=0
# to allow default RANK sorting
return qs.annotate(search_rank=Value(0))
value = strip_accents(value)
parsed_query = parse_search_query(value)
if not parsed_query:
# return empty queryset as the provided value is not searchable
# annotated with search_rank=0 to allow default RANK sorting
return qs.annotate(search_rank=Value(0)).none()
# Prefix query – broadens matching via :*
prefix_query = SearchQuery(parsed_query, search_type="raw", config="simple")
# Exact (websearch) query – used only for ranking, not filtering
exact_query = SearchQuery(value, search_type="websearch", config="simple")
qs = qs.filter(search_vector=prefix_query).annotate(
prefix_rank=SearchRank(F("search_vector"), prefix_query),
exact_rank=SearchRank(F("search_vector"), exact_query),
search_rank=F("exact_rank") * 2 + F("prefix_rank"),
)
return qs
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/search.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/core/tests/test_search.py | import pytest
from django.db.models import Value
from ...account.models import User
from ...account.search import update_user_search_vector
from ...checkout.models import Checkout
from ...checkout.search.indexing import update_checkouts_search_vector
from ...product.models import Product, ProductChannelListing
from ..postgres import NoValidationSearchVector
from ..search import _sanitize_word, parse_search_query, prefix_search
def test_sanitize_word_removes_tsquery_metacharacters():
# given
word_with_metacharacters = "test()&|!:<>'"
# when
result = _sanitize_word(word_with_metacharacters)
# then
assert result == "test"
def test_sanitize_word_preserves_safe_characters():
# given
safe_word = "user-name_123@example.com"
# when
result = _sanitize_word(safe_word)
# then
assert result == "user-name_123@example.com"
def test_sanitize_word_handles_empty_string():
# given
empty_string = ""
# when
result = _sanitize_word(empty_string)
# then
assert result == ""
def test_sanitize_word_handles_only_metacharacters():
# given
only_metacharacters = "()&|!"
# when
result = _sanitize_word(only_metacharacters)
# then
assert result == ""
def test_parse_search_query_single_word():
assert parse_search_query("coffee") == "coffee:*"
def test_parse_search_query_multiple_words_implicit_and():
assert parse_search_query("coffee shop") == "coffee:* & shop:*"
def test_parse_search_query_or_operator():
assert parse_search_query("coffee OR tea") == "coffee:* | tea:*"
def test_parse_search_query_negation():
assert parse_search_query("coffee -decaf") == "coffee:* & !decaf:*"
def test_parse_search_query_quoted_phrase():
assert parse_search_query('"green tea"') == "(green <-> tea)"
def test_parse_search_query_quoted_phrase_with_other_terms():
assert parse_search_query('coffee "green tea"') == "coffee:* & (green <-> tea)"
def test_parse_search_query_negated_quoted_phrase():
assert parse_search_query('coffee -"green tea"') == "coffee:* & !(green <-> tea)"
def test_parse_search_query_or_with_negation():
assert parse_search_query("coffee OR tea -decaf") == "coffee:* | tea:* & !decaf:*"
def test_parse_search_query_with_email():
assert parse_search_query("user@example.com") == "user@example.com:*"
def test_parse_search_query_empty_string():
assert parse_search_query("") is None
def test_parse_search_query_whitespace_normalization():
assert parse_search_query(" multiple spaces ") == "multiple:* & spaces:*"
def test_parse_search_query_preserves_case():
assert parse_search_query("Coffee SHOP") == "Coffee:* & SHOP:*"
def test_parse_search_query_single_word_in_quotes():
# Single word in quotes should be an exact match (no prefix)
assert parse_search_query('"coffee"') == "coffee"
def test_parse_search_query_quoted_word_with_or():
assert parse_search_query('"aaron" OR aallen') == "aaron | aallen:*"
def test_parse_search_query_or_two_plain_words():
assert parse_search_query("aaron0 OR aallen") == "aaron0:* | aallen:*"
def test_parse_search_query_or_between_phrases():
assert (
parse_search_query('"green tea" OR "black coffee"')
== "(green <-> tea) | (black <-> coffee)"
)
def test_parse_search_query_negated_word_with_or():
assert parse_search_query("coffee OR tea -decaf -sugar") == (
"coffee:* | tea:* & !decaf:* & !sugar:*"
)
def test_parse_search_query_phrase_and_prefix_and_negation():
assert parse_search_query('"green tea" maker -cheap') == (
"(green <-> tea) & maker:* & !cheap:*"
)
def test_parse_search_query_multiple_or_operators():
assert parse_search_query("coffee OR tea OR juice") == (
"coffee:* | tea:* | juice:*"
)
def test_parse_search_query_or_with_negated_phrase():
assert parse_search_query('coffee OR -"green tea"') == (
"coffee:* | !(green <-> tea)"
)
def test_parse_search_query_quoted_exact_and_prefix_mixed():
assert parse_search_query('"exact" prefix') == "exact & prefix:*"
def test_parse_search_query_multiple_phrases_with_words():
assert parse_search_query('"green tea" "black coffee" sugar') == (
"(green <-> tea) & (black <-> coffee) & sugar:*"
)
def test_parse_search_query_or_chain_with_phrase_in_middle():
assert parse_search_query('coffee OR "green tea" OR juice') == (
"coffee:* | (green <-> tea) | juice:*"
)
def test_parse_search_query_negation_before_or():
# Negation applies to decaf, then OR connects to tea
assert parse_search_query("coffee -decaf OR tea") == ("coffee:* & !decaf:* | tea:*")
def test_parse_search_query_parentheses_are_stripped():
# Parentheses are not supported for grouping; they are stripped as special chars
assert parse_search_query("(green AND Tea) OR (coffee AND -black)") == (
"green:* & Tea:* | coffee:* & !black:*"
)
def test_parse_search_query_lowercase_or_is_regular_word():
# Only uppercase OR is recognized as an operator
assert parse_search_query("coffee or tea") == "coffee:* & or:* & tea:*"
def test_parse_search_query_lowercase_and_is_regular_word():
assert parse_search_query("coffee and tea") == "coffee:* & and:* & tea:*"
def test_parse_search_query_mixed_case_or_is_regular_word():
assert parse_search_query("coffee Or tea") == "coffee:* & Or:* & tea:*"
def test_parse_search_query_unclosed_quote():
# Unclosed quote should still parse the words as a phrase
assert parse_search_query('"green tea') == "(green <-> tea)"
def test_parse_search_query_whitespace_only_returns_none():
assert parse_search_query(" ") is None
def test_parse_search_query_hyphenated_words_preserved():
assert parse_search_query("my-product-slug") == "my-product-slug:*"
def test_parse_search_query_quoted_phrase_with_metacharacters():
assert parse_search_query('"test(value)"') == "(test <-> value)"
@pytest.fixture
def products_for_search(category, product_type, channel_USD):
products = []
for name, description in [
("Coffee Maker", "Best coffee maker"),
("Coffeehouse Special", "Special blend for coffeehouses"),
("Tea Kettle", "Great for brewing tea"),
("Magnésium B6", "Supplement with magnesium"),
("Crème brûlée", "Classic French dessert"),
]:
product = Product.objects.create(
name=name,
slug=name.lower().replace(" ", "-"),
description_plaintext=description,
product_type=product_type,
category=category,
search_vector=(
NoValidationSearchVector(Value(name), config="simple", weight="A")
+ NoValidationSearchVector(
Value(description), config="simple", weight="C"
)
),
)
ProductChannelListing.objects.create(
product=product, channel=channel_USD, is_published=True
)
products.append(product)
return products
def test_prefix_search_returns_prefix_matches(products_for_search):
# given
qs = Product.objects.all()
# when
results = prefix_search(qs, "coff")
# then
assert results.count() == 2
names = {p.name for p in results}
assert names == {"Coffee Maker", "Coffeehouse Special"}
def test_prefix_search_perfect_match_scores_higher(products_for_search):
# given
qs = Product.objects.all()
# when
results = list(prefix_search(qs, "coffee").order_by("-search_rank"))
# then – "Coffee Maker" has exact word match, ranks above "Coffeehouse"
assert len(results) == 2
assert results[0].name == "Coffee Maker"
def test_prefix_search_empty_value_returns_all(products_for_search):
# when
results = prefix_search(Product.objects.all(), "")
# then
assert results.count() == 5
def test_prefix_search_not_searchable_value_returns_nothing(products_for_search):
# when
results = prefix_search(Product.objects.all(), ":::")
# then
assert results.count() == 0
def test_prefix_search_no_matches(products_for_search):
# when
results = prefix_search(Product.objects.all(), "xyz")
# then
assert results.count() == 0
def test_prefix_search_case_insensitive(products_for_search):
# when
results = prefix_search(Product.objects.all(), "COFFEE")
# then
assert results.count() == 2
def test_prefix_search_multiple_terms_and(products_for_search):
# when
results = prefix_search(Product.objects.all(), "coffee mak")
# then – only "Coffee Maker" matches both terms
assert results.count() == 1
assert results.first().name == "Coffee Maker"
def test_prefix_search_or_operator(products_for_search):
# when
results = prefix_search(Product.objects.all(), "coffee OR tea")
# then – all three products match (Coffee Maker, Coffeehouse, Tea Kettle)
assert results.count() == 3
def test_prefix_search_negation(products_for_search):
# when
results = prefix_search(Product.objects.all(), "coffee -special")
# then – only "Coffee Maker" remains (Coffeehouse Special is excluded)
assert results.count() == 1
assert results.first().name == "Coffee Maker"
def test_prefix_search_accent_insensitive_query(products_for_search):
# given — product indexed as "Magnésium B6", query without accent
qs = Product.objects.all()
# when
results = prefix_search(qs, "Magnesium")
# then
assert results.count() == 1
assert results.first().name == "Magnésium B6"
def test_prefix_search_accented_query_finds_accented_product(products_for_search):
# given — accented query should also match
qs = Product.objects.all()
# when
results = prefix_search(qs, "Magnésium")
# then
assert results.count() == 1
assert results.first().name == "Magnésium B6"
def test_prefix_search_accent_insensitive_partial(products_for_search):
# given — partial unaccented prefix
qs = Product.objects.all()
# when
results = prefix_search(qs, "crem")
# then
assert results.count() == 1
assert results.first().name == "Crème brûlée"
def test_prefix_search_accent_insensitive_multiword(products_for_search):
# given — mixed accent/unaccented multi-word query
qs = Product.objects.all()
# when
results = prefix_search(qs, "creme brulee")
# then
assert results.count() == 1
assert results.first().name == "Crème brûlée"
@pytest.fixture
def users_for_search():
users = []
for email, first, last in [
("john.doe@example.com", "John", "Doe"),
("johnny.smith@example.com", "Johnny", "Smith"),
("jane.doe@example.com", "Jane", "Doe"),
("jose.garcia@example.com", "José", "García"),
]:
user = User.objects.create(email=email, first_name=first, last_name=last)
update_user_search_vector(user, attach_addresses_data=False)
users.append(user)
return users
def test_prefix_search_users(users_for_search):
# when
results = prefix_search(User.objects.all(), "joh")
# then
assert results.count() == 2
emails = {u.email for u in results}
assert emails == {"john.doe@example.com", "johnny.smith@example.com"}
def test_prefix_search_users_perfect_match_priority(users_for_search):
# when
results = list(prefix_search(User.objects.all(), "john").order_by("-search_rank"))
# then – "John" exact match should rank higher than "Johnny"
assert len(results) == 2
assert results[0].first_name == "John"
def test_prefix_search_users_accent_insensitive(users_for_search):
# given — user indexed as "José García", query without accent
qs = User.objects.all()
# when
results = prefix_search(qs, "Jose")
# then
assert results.count() == 1
assert results.first().first_name == "José"
def test_prefix_search_users_accented_query(users_for_search):
# given — accented query should also match
qs = User.objects.all()
# when
results = prefix_search(qs, "José García")
# then
assert results.count() == 1
assert results.first().email == "jose.garcia@example.com"
def test_prefix_search_users_partial_accent_insensitive(users_for_search):
# given — partial unaccented prefix on last name
qs = User.objects.all()
# when
results = prefix_search(qs, "Garci")
# then
assert results.count() == 1
assert results.first().last_name == "García"
def test_prefix_search_checkout_exact_match_priority(
channel_USD,
):
# given
checkouts = []
# Checkout 1: email starts with "aaron00"
checkout1 = Checkout.objects.create(
channel=channel_USD,
email="aaron00@example.net",
user=None,
currency="USD",
)
# Checkout 2: different email, user is "Aaron Smith" (exact match for "aaron")
user2 = User.objects.create(
email="smith.other@example.com", first_name="Aaron", last_name="Smith"
)
checkout2 = Checkout.objects.create(
channel=channel_USD,
email=user2.email,
user=user2,
currency="USD",
)
# Checkout 3: email contains "aaron" in middle, user is "Bob Wilson"
# - not found as search uses prefix match
user3 = User.objects.create(
email="bob.wilson@example.com", first_name="Bob", last_name="Wilson"
)
checkout3 = Checkout.objects.create(
channel=channel_USD,
email=user3.email,
user=user3,
currency="USD",
)
# Checkout 4: user last name is "Aaron" (exact match), and email starts with `aaron`
user4 = User.objects.create(
email="aaron.thompson@example.com", first_name="Jane", last_name="Aaron"
)
checkout4 = Checkout.objects.create(
channel=channel_USD,
email=user4.email,
user=user4,
currency="USD",
)
# Checkout 5: no match at all
user5 = User.objects.create(
email="charlie.brown@example.com", first_name="Charlie", last_name="Brown"
)
checkout5 = Checkout.objects.create(
channel=channel_USD,
email=user5.email,
user=user5,
currency="USD",
)
checkouts = [checkout1, checkout2, checkout3, checkout4, checkout5]
# Update search vectors
update_checkouts_search_vector(checkouts)
# when
results = list(
prefix_search(Checkout.objects.all(), "aaron").order_by("-search_rank")
)
# then
expected_indexes = [3, 1, 0]
assert len(results) == len(expected_indexes)
for result, expected_index in zip(results, expected_indexes, strict=False):
assert result.pk == checkouts[expected_index].pk
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/core/tests/test_search.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/app/lock_objects.py | from django.db.models import QuerySet
from .models import App, AppProblem
def app_qs_select_for_update() -> QuerySet[App]:
return App.objects.order_by("pk").select_for_update(of=["self"])
def app_problem_qs_select_for_update() -> QuerySet[AppProblem]:
return AppProblem.objects.order_by("pk").select_for_update(of=["self"])
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/app/lock_objects.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/app/tests/fixtures/app_problem.py | from datetime import datetime
import pytest
from django.utils import timezone
from ....account.models import User
from ...models import App, AppProblem
@pytest.fixture
def app_problem_generator():
def create_problem(
app: App,
key: str = "test-key",
message: str = "Test problem",
count: int = 1,
is_critical: bool = False,
dismissed: bool = False,
dismissed_by_user: User | None = None,
updated_at: datetime | None = None,
) -> AppProblem:
if updated_at is None:
updated_at = timezone.now()
dismissed_by_user_email = None
if dismissed_by_user is not None:
dismissed_by_user_email = dismissed_by_user.email
problem = AppProblem.objects.create(
app=app,
message=message,
key=key,
count=count,
is_critical=is_critical,
dismissed=dismissed,
dismissed_by_user_email=dismissed_by_user_email,
dismissed_by_user=dismissed_by_user,
)
# Use .update() to set specific updated_at for testing since auto_now=True
# ignores values passed to create/save
if updated_at is not None:
AppProblem.objects.filter(pk=problem.pk).update(updated_at=updated_at)
problem.refresh_from_db()
return problem
return create_problem
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/app/tests/fixtures/app_problem.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
saleor/saleor:saleor/graphql/app/dataloaders/app_problems.py | from collections import defaultdict
from ....app.models import AppProblem
from ...core.dataloaders import DataLoader
class AppProblemsByAppIdLoader(DataLoader[int, list[AppProblem]]):
context_key = "app_problems_by_app_id"
def batch_load(self, keys):
problems = AppProblem.objects.using(self.database_connection_name).filter(
app_id__in=keys
)
problem_map = defaultdict(list)
for problem in problems:
problem_map[problem.app_id].append(problem)
return [problem_map.get(app_id, []) for app_id in keys]
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/app/dataloaders/app_problems.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
saleor/saleor:saleor/graphql/app/mutations/app_problem_create.py | import datetime
from typing import Annotated, Any, cast
import graphene
from django.utils import timezone
from pydantic import BaseModel, ConfigDict, Field, StringConstraints, field_validator
from pydantic import ValidationError as PydanticValidationError
from ....app.error_codes import (
AppProblemCreateErrorCode as AppProblemCreateErrorCodeEnum,
)
from ....app.lock_objects import app_qs_select_for_update
from ....app.models import App as AppModel
from ....app.models import AppProblem
from ....core.tracing import traced_atomic_transaction
from ....permission.auth_filters import AuthorizationFilters
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_322
from ...core.doc_category import DOC_CATEGORY_APPS
from ...core.mutations import BaseMutation
from ...core.scalars import Minute, PositiveInt
from ...core.types import Error
from ...error import pydantic_to_validation_error
from ..enums import AppProblemCreateErrorCode
from ..types import AppProblem as AppProblemType
class AppProblemCreateError(Error):
code = AppProblemCreateErrorCode(description="The error code.", required=True)
class AppProblemCreateValidatedInput(BaseModel):
model_config = ConfigDict(frozen=True)
message: Annotated[str, StringConstraints(min_length=3)]
key: Annotated[str, StringConstraints(min_length=3, max_length=128)]
# No threshold - will never escalate to critical if not set by App itself
critical_threshold: Annotated[int, Field(ge=1)] | None = None
# Minutes
aggregation_period: Annotated[int, Field(ge=0)] = 60
@field_validator("message", mode="after")
@classmethod
def truncate_message(cls, v: str) -> str:
"""Truncate message to 2048 characters including '...' suffix if too long."""
if len(v) > 2048:
return v[:2045] + "..."
return v
@field_validator("aggregation_period", mode="before")
@classmethod
def default_aggregation_period(cls, v: int | None) -> int:
"""Accept null from GraphQL and map to default (60)."""
if v is None:
return 60
return v
class AppProblemCreateInput(graphene.InputObjectType):
message = graphene.String(
required=True,
description=(
"The problem message to display. Must be at least 3 characters. "
"Messages longer than 2048 characters will be truncated to 2048 "
"characters with '...' suffix."
),
)
key = graphene.String(
required=True,
description=(
"Key identifying the type of problem. App can add multiple problems under "
"the same key, to merge them together or delete them in batch. "
"Must be between 3 and 128 characters."
),
)
critical_threshold = PositiveInt(
required=False,
description=(
"If set, the problem becomes critical when count reaches this value. "
"If sent again with higher value than already counted, problem can be de-escalated."
),
)
aggregation_period = Minute(
required=False,
default_value=60,
description=(
"Time window in minutes for aggregating problems with the same key. "
"Defaults to 60. If 0, a new problem is always created."
),
)
class AppProblemCreate(BaseMutation):
app_problem = graphene.Field(
AppProblemType, description="The created or updated app problem."
)
class Arguments:
input = AppProblemCreateInput(
required=True, description="Fields required to create an app problem."
)
class Meta:
description = "Add a problem to the calling app." + ADDED_IN_322
doc_category = DOC_CATEGORY_APPS
permissions = (AuthorizationFilters.AUTHENTICATED_APP,)
error_type_class = AppProblemCreateError
@classmethod
def perform_mutation(
cls, _root: None, info: ResolveInfo, /, **data: Any
) -> "AppProblemCreate":
app = cast(AppModel, info.context.app)
input_data = data["input"]
try:
validated = AppProblemCreateValidatedInput(**input_data)
except PydanticValidationError as exc:
raise pydantic_to_validation_error(
exc, default_error_code=AppProblemCreateErrorCodeEnum.INVALID.value
) from exc
now = timezone.now()
with traced_atomic_transaction():
# Lock the App row to serialize all problem operations for this app
# At this point it trades performance for correctness
# If we need to improve performance, we can skip locking entire app row and
# schedule a cleanup task for > 100 items
app_qs_select_for_update().filter(pk=app.pk).first()
existing = (
AppProblem.objects.filter(app=app, key=validated.key, dismissed=False)
.order_by("-updated_at")
.first()
)
if not existing:
problem = cls._create_new_problem(app, validated, now)
return AppProblemCreate(app_problem=problem)
# Flow for existing / update
aggregation_enabled = validated.aggregation_period > 0
cutoff = now - datetime.timedelta(minutes=validated.aggregation_period)
within_aggregation_window = (
aggregation_enabled and existing.updated_at >= cutoff
)
if not within_aggregation_window:
problem = cls._create_new_problem(app, validated, now)
return AppProblemCreate(app_problem=problem)
cls._aggregate_existing(existing, validated, now)
return AppProblemCreate(app_problem=existing)
@classmethod
def _aggregate_existing(
cls,
existing: AppProblem,
validated: AppProblemCreateValidatedInput,
now: datetime.datetime,
) -> None:
# In transaction block - we can safely modify in memory
existing.count += 1
existing.is_critical = bool(
validated.critical_threshold
and existing.count >= validated.critical_threshold
)
existing.message = validated.message
existing.updated_at = now
existing.save(update_fields=["count", "updated_at", "message", "is_critical"])
@classmethod
def _create_new_problem(
cls,
app: AppModel,
validated: AppProblemCreateValidatedInput,
now: datetime.datetime,
) -> AppProblem:
total_count = AppProblem.objects.filter(app=app).count()
# +1 accounts for the new problem we're about to create
excess_count = total_count - AppProblem.MAX_PROBLEMS_PER_APP + 1
if excess_count > 0:
oldest_pks = list(
AppProblem.objects.filter(app=app)
.order_by("updated_at")
.values_list("pk", flat=True)[:excess_count]
)
if oldest_pks:
AppProblem.objects.filter(pk__in=oldest_pks).delete()
immediately_critical = bool(
validated.critical_threshold and validated.critical_threshold <= 1
)
return AppProblem.objects.create(
app=app,
message=validated.message,
key=validated.key,
count=1,
is_critical=immediately_critical,
)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/app/mutations/app_problem_create.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/app/mutations/app_problem_dismiss.py | from typing import Any, cast
import graphene
from django.core.exceptions import ValidationError
from ....account.models import User
from ....app.error_codes import (
AppProblemDismissErrorCode as AppProblemDismissErrorCodeEnum,
)
from ....app.lock_objects import app_problem_qs_select_for_update
from ....app.models import App as AppModel
from ....app.models import AppProblem
from ....core.tracing import traced_atomic_transaction
from ....permission.auth_filters import AuthorizationFilters
from ....permission.enums import AppPermission
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_322
from ...core.doc_category import DOC_CATEGORY_APPS
from ...core.mutations import BaseMutation
from ...core.types import BaseInputObjectType, Error, NonNullList
from ...core.utils import from_global_id_or_error
from ...core.validators import validate_one_of_args_is_in_mutation
from ...utils import get_user_or_app_from_context
from ..enums import AppProblemDismissErrorCode
from ..types import App
MAX_ITEMS_LIMIT = 100
class AppProblemDismissError(Error):
code = AppProblemDismissErrorCode(description="The error code.", required=True)
class AppProblemDismissByAppInput(BaseInputObjectType):
"""Input for app callers to dismiss their own problems."""
ids = NonNullList(
graphene.ID,
required=False,
description=f"List of problem IDs to dismiss. Cannot be combined with keys. Max {MAX_ITEMS_LIMIT}.",
)
keys = NonNullList(
graphene.String,
required=False,
description=f"List of problem keys to dismiss. Cannot be combined with ids. Max {MAX_ITEMS_LIMIT}.",
)
class Meta:
doc_category = DOC_CATEGORY_APPS
class AppProblemDismissByStaffWithIdsInput(BaseInputObjectType):
"""Input for staff callers to dismiss problems by IDs."""
ids = NonNullList(
graphene.ID,
required=True,
description=f"List of problem IDs to dismiss. Max {MAX_ITEMS_LIMIT}.",
)
class Meta:
doc_category = DOC_CATEGORY_APPS
class AppProblemDismissByStaffWithKeysInput(BaseInputObjectType):
"""Input for staff callers to dismiss problems by keys."""
keys = NonNullList(
graphene.String,
required=True,
description=f"List of problem keys to dismiss. Max {MAX_ITEMS_LIMIT}.",
)
app = graphene.ID(
required=True,
description="ID of the app whose problems to dismiss.",
)
class Meta:
doc_category = DOC_CATEGORY_APPS
class AppProblemDismissInput(BaseInputObjectType):
"""Input for dismissing app problems. Only one can be specified."""
by_app = graphene.Field(
AppProblemDismissByAppInput,
description="For app callers only - dismiss own problems.",
)
by_staff_with_ids = graphene.Field(
AppProblemDismissByStaffWithIdsInput,
description="For staff callers - dismiss problems by IDs.",
)
by_staff_with_keys = graphene.Field(
AppProblemDismissByStaffWithKeysInput,
description="For staff callers - dismiss problems by keys for specified app.",
)
class Meta:
doc_category = DOC_CATEGORY_APPS
class AppProblemDismiss(BaseMutation):
class Arguments:
input = AppProblemDismissInput(
required=True,
description="Input for dismissing app problems.",
)
class Meta:
description = "Dismiss problems for an app." + ADDED_IN_322
doc_category = DOC_CATEGORY_APPS
permissions = (
AppPermission.MANAGE_APPS,
AuthorizationFilters.AUTHENTICATED_APP,
)
error_type_class = AppProblemDismissError
@classmethod
def perform_mutation(
cls, _root: None, info: ResolveInfo, /, **data: Any
) -> "AppProblemDismiss":
caller_app = info.context.app
input_data = data.get("input", {})
by_app = input_data.get("by_app")
by_staff_with_ids = input_data.get("by_staff_with_ids")
by_staff_with_keys = input_data.get("by_staff_with_keys")
validate_one_of_args_is_in_mutation(
"by_app",
by_app,
"by_staff_with_ids",
by_staff_with_ids,
"by_staff_with_keys",
by_staff_with_keys,
use_camel_case=True,
)
if by_app and caller_app:
cls._validate_by_app_input(by_app)
cls._dismiss_for_app_caller(by_app, caller_app)
elif by_staff_with_ids and not caller_app:
cls._validate_items_limit(by_staff_with_ids["ids"], "ids")
cls._dismiss_by_ids_for_staff(info, by_staff_with_ids["ids"])
elif by_staff_with_keys and not caller_app:
cls._validate_items_limit(by_staff_with_keys["keys"], "keys")
cls._dismiss_by_keys_for_staff(
info, by_staff_with_keys["keys"], by_staff_with_keys["app"]
)
else:
cls._raise_caller_type_mismatch(by_app, by_staff_with_ids)
return AppProblemDismiss()
@classmethod
def _raise_caller_type_mismatch(
cls,
by_app: dict | None,
by_staff_with_ids: dict | None,
) -> None:
if by_app is not None:
raise ValidationError(
{
"byApp": ValidationError(
"Only app callers can use 'byApp'.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
)
field = "byStaffWithIds" if by_staff_with_ids else "byStaffWithKeys"
raise ValidationError(
{
field: ValidationError(
"App callers cannot use this input. Use 'byApp' instead.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
)
@classmethod
def _validate_by_app_input(cls, by_app: dict) -> None:
"""Validate byApp input has exactly one of ids or keys."""
ids = by_app.get("ids")
keys = by_app.get("keys")
if ids and keys:
raise ValidationError(
{
"byApp": ValidationError(
"Cannot specify both 'ids' and 'keys'.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
)
if not ids and not keys:
raise ValidationError(
{
"byApp": ValidationError(
"Must provide either 'ids' or 'keys'.",
code=AppProblemDismissErrorCodeEnum.REQUIRED.value,
)
}
)
items = ids or keys
assert items is not None
cls._validate_items_limit(items, "ids" if ids else "keys")
@classmethod
def _dismiss_for_app_caller(
cls,
by_app: dict,
caller_app: AppModel,
) -> None:
"""Dismiss problems for an app caller (can only dismiss own problems)."""
ids = by_app.get("ids")
keys = by_app.get("keys")
if ids:
problem_pks = cls._parse_problem_ids(ids)
with traced_atomic_transaction():
# Validate that all provided IDs belong to the caller app
other_app_problems = AppProblem.objects.filter(
pk__in=problem_pks
).exclude(app=caller_app)
if other_app_problems.exists():
raise ValidationError(
{
"ids": ValidationError(
"Cannot dismiss problems belonging to other apps.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
)
pks = (
app_problem_qs_select_for_update()
.filter(pk__in=problem_pks, app=caller_app, dismissed=False)
.values_list("pk", flat=True)
)
AppProblem.objects.filter(pk__in=pks).update(dismissed=True)
else:
with traced_atomic_transaction():
pks = (
app_problem_qs_select_for_update()
.filter(key__in=keys, app=caller_app, dismissed=False)
.values_list("pk", flat=True)
)
AppProblem.objects.filter(pk__in=pks).update(dismissed=True)
@classmethod
def _validate_items_limit(cls, items: list[str], field_name: str) -> None:
"""Validate that items list does not exceed MAX_ITEMS_LIMIT."""
if len(items) > MAX_ITEMS_LIMIT:
raise ValidationError(
{
field_name: ValidationError(
f"Cannot specify more than {MAX_ITEMS_LIMIT} items.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
)
@classmethod
def _dismiss_by_ids_for_staff(
cls,
info: ResolveInfo,
ids: list[str],
) -> None:
"""Dismiss problems by IDs for a staff caller."""
requestor = cast(User, get_user_or_app_from_context(info.context))
problem_pks = cls._parse_problem_ids(ids)
with traced_atomic_transaction():
pks = (
app_problem_qs_select_for_update()
.filter(pk__in=problem_pks, dismissed=False)
.values_list("pk", flat=True)
)
AppProblem.objects.filter(pk__in=pks).update(
dismissed=True,
dismissed_by_user_email=requestor.email,
dismissed_by_user=requestor,
)
@classmethod
def _dismiss_by_keys_for_staff(
cls,
info: ResolveInfo,
keys: list[str],
app_id: str,
) -> None:
"""Dismiss problems by keys for a staff caller."""
requestor = cast(User, get_user_or_app_from_context(info.context))
target_app = cls.get_node_or_error(info, app_id, field="app", only_type=App)
with traced_atomic_transaction():
pks = (
app_problem_qs_select_for_update()
.filter(app=target_app, key__in=keys, dismissed=False)
.values_list("pk", flat=True)
)
AppProblem.objects.filter(pk__in=pks).update(
dismissed=True,
dismissed_by_user_email=requestor.email,
dismissed_by_user=requestor,
)
@classmethod
def _parse_problem_ids(cls, global_ids: list[str]) -> list[int]:
"""Convert global IDs to database PKs."""
problem_pks = []
for global_id in global_ids:
_, pk = from_global_id_or_error(global_id, "AppProblem")
try:
problem_pks.append(int(pk))
except (ValueError, TypeError) as err:
raise ValidationError(
{
"ids": ValidationError(
f"Invalid ID: {global_id}.",
code=AppProblemDismissErrorCodeEnum.INVALID.value,
)
}
) from err
return problem_pks
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/app/mutations/app_problem_dismiss.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
saleor/saleor:saleor/graphql/app/tests/mutations/test_app_problem_create.py | from .....app.models import AppProblem
from ....tests.utils import assert_no_permission, get_graphql_content
APP_PROBLEM_CREATE_MUTATION = """
mutation AppProblemCreate($input: AppProblemCreateInput!) {
appProblemCreate(input: $input) {
appProblem {
id
message
key
count
isCritical
updatedAt
}
errors {
field
code
message
}
}
}
"""
def test_app_problem_create(app_api_client, app):
# given
variables = {"input": {"message": "Something went wrong", "key": "error-1"}}
# when
response = app_api_client.post_graphql(APP_PROBLEM_CREATE_MUTATION, variables)
content = get_graphql_content(response)
# then
data = content["data"]["appProblemCreate"]
assert not data["errors"]
problem_data = data["appProblem"]
assert problem_data["message"] == variables["input"]["message"]
assert problem_data["key"] == variables["input"]["key"]
assert problem_data["count"] == 1
assert problem_data["isCritical"] is False
db_problem = AppProblem.objects.get(app=app)
assert db_problem.message == variables["input"]["message"]
assert db_problem.key == variables["input"]["key"]
assert db_problem.count == 1
def test_app_problem_create_by_staff_user_fails(
staff_api_client, permission_manage_apps
):
# given
staff_api_client.user.user_permissions.add(permission_manage_apps)
variables = {"input": {"message": "Something went wrong", "key": "err"}}
# when
response = staff_api_client.post_graphql(APP_PROBLEM_CREATE_MUTATION, variables)
# then
assert_no_permission(response)
| {
"repo_id": "saleor/saleor",
"file_path": "saleor/graphql/app/tests/mutations/test_app_problem_create.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.