sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
langflow-ai/langflow:src/backend/tests/integration/components/bundles/cometapi/test_cometapi_integration.py | import os
from unittest.mock import MagicMock, patch
import pytest
from lfx.components.cometapi.cometapi import CometAPIComponent
class TestCometAPIIntegration:
"""Integration tests for CometAPI component."""
@pytest.fixture
def component(self):
"""Create a CometAPI component instance for testing."""
return CometAPIComponent()
@pytest.fixture
def mock_api_key(self):
"""Mock API key for testing."""
return "test-cometapi-key"
def test_component_import(self):
"""Test that the CometAPI component can be imported."""
from lfx.components.cometapi.cometapi import CometAPIComponent
assert CometAPIComponent is not None
def test_component_instantiation(self, component):
"""Test that the component can be instantiated."""
assert component is not None
assert component.display_name == "CometAPI"
assert component.name == "CometAPIModel"
def test_component_inputs_present(self, component):
"""Test that all expected inputs are present."""
input_names = [input_.name for input_ in component.inputs]
expected_inputs = [
"api_key",
"app_name",
"model_name",
"model_kwargs",
"temperature",
"max_tokens",
"seed",
"json_mode",
]
for expected_input in expected_inputs:
assert expected_input in input_names
@patch("requests.get")
def test_model_fetching_integration(self, mock_get, component, mock_api_key):
"""Test the complete model fetching flow."""
# Mock successful API response
mock_response = MagicMock()
mock_response.json.return_value = {
"data": [{"id": "gpt-4o-mini"}, {"id": "claude-3-5-haiku-latest"}, {"id": "gemini-2.5-flash"}]
}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Set API key
component.set_attributes({"api_key": mock_api_key})
# Fetch models
models = component.get_models()
# Verify results
assert models == ["gpt-4o-mini", "claude-3-5-haiku-latest", "gemini-2.5-flash"]
mock_get.assert_called_once()
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_model_building_integration(self, mock_chat_openai, component, mock_api_key):
"""Test the complete model building flow."""
# Mock ChatOpenAI
mock_instance = MagicMock()
mock_chat_openai.return_value = mock_instance
# Configure component
component.set_attributes(
{
"api_key": mock_api_key,
"model_name": "gpt-4o-mini",
"temperature": 0.7,
"max_tokens": 1000,
"seed": 42,
"json_mode": False,
}
)
# Build model
model = component.build_model()
# Verify ChatOpenAI was called correctly
assert mock_chat_openai.call_count == 1
_args, kwargs = mock_chat_openai.call_args
assert kwargs["model"] == "gpt-4o-mini"
assert kwargs["api_key"] == "test-cometapi-key"
assert kwargs["max_tokens"] == 1000
assert kwargs["temperature"] == 0.7
assert kwargs["model_kwargs"] == {}
# streaming defaults to None when not explicitly set
assert kwargs.get("streaming") in (None, False)
assert kwargs["seed"] == 42
assert kwargs["base_url"] == "https://api.cometapi.com/v1"
assert model == mock_instance
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_json_mode_integration(self, mock_chat_openai, component, mock_api_key):
"""Test JSON mode integration."""
# Mock ChatOpenAI and bind method
mock_instance = MagicMock()
mock_bound_instance = MagicMock()
mock_instance.bind.return_value = mock_bound_instance
mock_chat_openai.return_value = mock_instance
# Configure component with JSON mode
component.set_attributes({"api_key": mock_api_key, "model_name": "gpt-4o-mini", "json_mode": True})
# Build model
model = component.build_model()
# Verify JSON mode binding
mock_instance.bind.assert_called_once_with(response_format={"type": "json_object"})
assert model == mock_bound_instance
def test_error_handling_integration(self, component):
"""Test error handling in integration scenarios."""
# Test with invalid model name
component.set_attributes({"model_name": "Select a model"})
with pytest.raises(ValueError, match="Please select a valid CometAPI model"):
component.build_model()
@patch("requests.get")
def test_fallback_behavior_integration(self, mock_get, component):
"""Test fallback behavior when API is unavailable."""
# Mock API failure
import requests
mock_get.side_effect = requests.RequestException("Network error")
# Should fallback to constants
models = component.get_models()
# Should return fallback models
from lfx.base.models.cometapi_constants import MODEL_NAMES
assert models == MODEL_NAMES
assert "Error fetching models" in component.status
def test_update_build_config_integration(self, component):
"""Test update_build_config integration."""
build_config = {"model_name": {"value": "current-model", "placeholder": "Select a model"}}
with patch.object(component, "get_models", return_value=["gpt-4o-mini", "claude-3-5-haiku-latest"]):
updated_config = component.update_build_config(build_config, "new-key", "api_key")
# Verify config was updated
assert "model_name" in updated_config
model_config = updated_config["model_name"]
assert "gpt-4o-mini" in model_config["options"]
assert "claude-3-5-haiku-latest" in model_config["options"]
@pytest.mark.skipif(os.getenv("COMETAPI_KEY") is None, reason="COMETAPI_KEY not set")
def test_real_api_integration(self, component):
"""Test with real API key if available."""
component.set_attributes(
{"api_key": os.getenv("COMETAPI_KEY"), "model_name": "gpt-4o-mini", "temperature": 0.1, "max_tokens": 50}
)
# Test model building
model = component.build_model()
assert model is not None
# Test model fetching
models = component.get_models()
assert isinstance(models, list)
assert len(models) > 0
def test_component_serialization(self, component):
"""Test that component can be serialized/deserialized."""
# Set some values
component.set_attributes({"api_key": "test-key", "model_name": "gpt-4o-mini", "temperature": 0.5})
# Test that component attributes are accessible
assert component.api_key == "test-key"
assert component.model_name == "gpt-4o-mini"
assert component.temperature == 0.5
def test_component_validation(self, component):
"""Test component input validation."""
# Test with valid inputs
component.set_attributes(
{"api_key": "valid-key", "model_name": "gpt-4o-mini", "temperature": 0.7, "max_tokens": 1000}
)
# Should not raise any validation errors
assert component.api_key is not None
assert component.model_name is not None
def test_component_defaults(self, component):
"""Test component default values."""
# Check that component has reasonable defaults
assert component.temperature == 0.7
assert component.seed == 1
assert component.json_mode is False
assert component.model_kwargs == {}
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/integration/components/bundles/cometapi/test_cometapi_integration.py",
"license": "MIT License",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/components/bundles/cometapi/test_cometapi_component.py | import os
from unittest.mock import MagicMock, patch
import pytest
from langchain_openai import ChatOpenAI
from lfx.components.cometapi.cometapi import CometAPIComponent
from pydantic.v1 import SecretStr
from tests.base import ComponentTestBaseWithoutClient
class TestCometAPIComponent(ComponentTestBaseWithoutClient):
@pytest.fixture
def component_class(self):
return CometAPIComponent
@pytest.fixture
def default_kwargs(self):
return {
"api_key": "test-cometapi-key",
"model_name": "gpt-4o-mini",
"temperature": 0.7,
"max_tokens": 1000,
"seed": 1,
"json_mode": False,
"model_kwargs": {},
"app_name": "test-app",
}
@pytest.fixture
def file_names_mapping(self):
return []
def test_basic_setup(self, component_class, default_kwargs):
"""Test basic component initialization."""
component = component_class()
component.set_attributes(default_kwargs)
assert component.display_name == "CometAPI"
assert component.description == "All AI Models in One API 500+ AI Models"
assert component.icon == "CometAPI"
assert component.name == "CometAPIModel"
assert component.api_key == "test-cometapi-key"
assert component.model_name == "gpt-4o-mini"
assert component.temperature == 0.7
assert component.max_tokens == 1000
assert component.seed == 1
assert component.json_mode is False
assert component.app_name == "test-app"
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_build_model_success(self, mock_chat_openai, component_class, default_kwargs):
"""Test successful model building."""
mock_instance = MagicMock()
mock_chat_openai.return_value = mock_instance
component = component_class()
component.set_attributes(default_kwargs)
model = component.build_model()
mock_chat_openai.assert_called_once_with(
model="gpt-4o-mini",
api_key="test-cometapi-key",
max_tokens=1000,
temperature=0.7,
model_kwargs={},
streaming=False,
seed=1,
base_url="https://api.cometapi.com/v1",
)
assert model == mock_instance
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_build_model_with_json_mode(self, mock_chat_openai, component_class, default_kwargs):
"""Test model building with JSON mode enabled."""
mock_instance = MagicMock()
mock_bound_instance = MagicMock()
mock_instance.bind.return_value = mock_bound_instance
mock_chat_openai.return_value = mock_instance
default_kwargs["json_mode"] = True
component = component_class()
component.set_attributes(default_kwargs)
model = component.build_model()
mock_chat_openai.assert_called_once()
mock_instance.bind.assert_called_once_with(response_format={"type": "json_object"})
assert model == mock_bound_instance
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_build_model_with_streaming(self, mock_chat_openai, component_class, default_kwargs):
"""Test model building with streaming enabled."""
mock_instance = MagicMock()
mock_chat_openai.return_value = mock_instance
component = component_class()
component.set_attributes(default_kwargs)
component.stream = True
component.build_model()
_args, kwargs = mock_chat_openai.call_args
assert kwargs["streaming"] is True
def test_build_model_invalid_model_selection(self, component_class, default_kwargs):
"""Test that invalid model selection raises ValueError."""
default_kwargs["model_name"] = "Select a model"
component = component_class()
component.set_attributes(default_kwargs)
with pytest.raises(ValueError, match="Please select a valid CometAPI model"):
component.build_model()
@patch("lfx.components.cometapi.cometapi.ChatOpenAI")
def test_build_model_exception_handling(self, mock_chat_openai, component_class, default_kwargs):
"""Test that build_model handles exceptions properly."""
mock_chat_openai.side_effect = ValueError("Invalid API key")
component = component_class()
component.set_attributes(default_kwargs)
with pytest.raises(ValueError, match="Could not connect to CometAPI"):
component.build_model()
@patch("requests.get")
def test_get_models_success(self, mock_get, component_class, default_kwargs):
"""Test successful model fetching from API."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": [{"id": "gpt-4o-mini"}, {"id": "claude-3-5-haiku-latest"}, {"id": "gemini-2.5-flash"}]
}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
component = component_class()
component.set_attributes(default_kwargs)
models = component.get_models()
assert models == ["gpt-4o-mini", "claude-3-5-haiku-latest", "gemini-2.5-flash"]
mock_get.assert_called_once_with(
"https://api.cometapi.com/v1/models",
headers={"Content-Type": "application/json", "Authorization": "Bearer test-cometapi-key"},
timeout=10,
)
@patch("requests.get")
def test_get_models_json_decode_error(self, mock_get, component_class, default_kwargs):
"""Test model fetching with JSON decode error."""
mock_response = MagicMock()
mock_response.json.side_effect = ValueError("Invalid JSON")
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
component = component_class()
component.set_attributes(default_kwargs)
models = component.get_models()
# Should return fallback models
from lfx.base.models.cometapi_constants import MODEL_NAMES
assert models == MODEL_NAMES
assert "Error decoding models response" in component.status
@pytest.mark.skipif(os.getenv("COMETAPI_KEY") is None, reason="COMETAPI_KEY is not set")
def test_build_model_integration(self):
"""Integration test with real API key (if available)."""
component = CometAPIComponent()
component.api_key = SecretStr(os.getenv("COMETAPI_KEY"))
component.model_name = "gpt-4o-mini"
component.temperature = 0.2
component.max_tokens = 100
component.seed = 42
model = component.build_model()
assert isinstance(model, ChatOpenAI)
assert model.model_name == "gpt-4o-mini"
assert model.openai_api_base == "https://api.cometapi.com/v1"
@pytest.mark.skipif(os.getenv("COMETAPI_KEY") is None, reason="COMETAPI_KEY is not set")
def test_get_models_integration(self):
"""Integration test for get_models with real API key (if available)."""
component = CometAPIComponent()
component.api_key = SecretStr(os.getenv("COMETAPI_KEY"))
models = component.get_models()
assert isinstance(models, list)
assert len(models) > 0
# Should contain some expected models
expected_models = ["gpt-4o-mini", "claude-3-5-haiku-latest", "gemini-2.5-flash"]
assert any(model in models for model in expected_models)
def test_component_inputs_structure(self, component_class):
"""Test that component has all required inputs."""
component = component_class()
input_names = [input_.name for input_ in component.inputs]
expected_inputs = [
"api_key",
"app_name",
"model_name",
"model_kwargs",
"temperature",
"max_tokens",
"seed",
"json_mode",
]
for input_name in expected_inputs:
assert input_name in input_names
def test_component_input_requirements(self, component_class):
"""Test that required inputs are properly marked."""
component = component_class()
# Find required inputs
required_inputs = [input_ for input_ in component.inputs if input_.required]
required_names = [input_.name for input_ in required_inputs]
assert "api_key" in required_names
assert "model_name" in required_names
def test_component_input_types(self, component_class):
"""Test that inputs have correct types."""
component = component_class()
# Find specific inputs by name
api_key_input = next(input_ for input_ in component.inputs if input_.name == "api_key")
model_name_input = next(input_ for input_ in component.inputs if input_.name == "model_name")
temperature_input = next(input_ for input_ in component.inputs if input_.name == "temperature")
assert api_key_input.field_type.value == "str" # SecretStrInput
assert model_name_input.field_type.value == "str" # DropdownInput (actually returns "str")
assert temperature_input.field_type.value == "slider" # SliderInput
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/components/bundles/cometapi/test_cometapi_component.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/components/bundles/cometapi/test_cometapi_constants.py | import pytest
from lfx.base.models.cometapi_constants import COMETAPI_MODELS, MODEL_NAMES
class TestCometAPIConstants:
"""Test CometAPI constants and model lists."""
def test_cometapi_models_not_empty(self):
"""Test that COMETAPI_MODELS list is not empty."""
assert len(COMETAPI_MODELS) > 0
assert isinstance(COMETAPI_MODELS, list)
def test_model_names_alias(self):
"""Test that MODEL_NAMES is an alias for COMETAPI_MODELS."""
assert MODEL_NAMES == COMETAPI_MODELS
assert MODEL_NAMES is COMETAPI_MODELS # Should be the same object
def test_models_are_strings(self):
"""Test that all models in the list are strings."""
for model in COMETAPI_MODELS:
assert isinstance(model, str)
assert len(model) > 0
def test_specific_models_present(self):
"""Test that specific expected models are present."""
expected_models = ["gpt-4o-mini", "claude-3-5-haiku-latest", "gemini-2.5-flash", "deepseek-chat"]
for expected_model in expected_models:
assert expected_model in COMETAPI_MODELS
def test_no_duplicate_models(self):
"""Test that there are no duplicate models in the list."""
assert len(COMETAPI_MODELS) == len(set(COMETAPI_MODELS))
@pytest.mark.parametrize(
"model_name", ["gpt-4o-mini", "claude-3-5-haiku-latest", "gemini-2.5-flash", "deepseek-chat", "grok-3"]
)
def test_specific_model_in_list(self, model_name):
"""Parametrized test for specific models."""
assert model_name in COMETAPI_MODELS
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/components/bundles/cometapi/test_cometapi_constants.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/base/models/cometapi_constants.py | """CometAPI model constants and configuration.
This module contains the default model names available through CometAPI.
These models are used as fallbacks when the API is unavailable or when
no API key is provided.
"""
from typing import Final
# CometAPI available model list based on actual API offerings
COMETAPI_MODELS: Final[list[str]] = [
# GPT series
"gpt-5-chat-latest",
"chatgpt-4o-latest",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1",
"gpt-4o-mini",
"o4-mini-2025-04-16",
"o3-pro-2025-06-10",
# Claude series
"claude-sonnet-4-5-20250929",
"claude-opus-4-1-20250805",
"claude-opus-4-1-20250805-thinking",
"claude-sonnet-4-20250514",
"claude-sonnet-4-20250514-thinking",
"claude-3-7-sonnet-latest",
"claude-3-5-haiku-latest",
# Gemini series
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
"gemini-2.0-flash",
# Grok series
"grok-4-0709",
"grok-3",
"grok-3-mini",
"grok-2-image-1212",
# DeepSeek series
"deepseek-v3.1",
"deepseek-v3",
"deepseek-r1-0528",
"deepseek-chat",
"deepseek-reasoner",
# Qwen series
"qwen3-30b-a3b",
"qwen3-coder-plus-2025-07-22",
]
# Backward compatibility alias
MODEL_NAMES: Final[list[str]] = COMETAPI_MODELS
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/base/models/cometapi_constants.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/cometapi/cometapi.py | import json
import requests
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from typing_extensions import override
from lfx.base.models.cometapi_constants import MODEL_NAMES
from lfx.base.models.model import LCModelComponent
from lfx.field_typing import LanguageModel
from lfx.field_typing.range_spec import RangeSpec
from lfx.inputs.inputs import (
BoolInput,
DictInput,
DropdownInput,
IntInput,
SecretStrInput,
SliderInput,
StrInput,
)
class CometAPIComponent(LCModelComponent):
"""CometAPI component for language models."""
display_name = "CometAPI"
description = "All AI Models in One API 500+ AI Models"
icon = "CometAPI"
name = "CometAPIModel"
inputs = [
*LCModelComponent.get_base_inputs(),
SecretStrInput(
name="api_key",
display_name="CometAPI Key",
required=True,
info="Your CometAPI key",
real_time_refresh=True,
),
StrInput(
name="app_name",
display_name="App Name",
info="Your app name for CometAPI rankings",
advanced=True,
),
DropdownInput(
name="model_name",
display_name="Model",
info="The model to use for chat completion",
options=["Select a model"],
value="Select a model",
real_time_refresh=True,
required=True,
),
DictInput(
name="model_kwargs",
display_name="Model Kwargs",
info="Additional keyword arguments to pass to the model.",
advanced=True,
),
SliderInput(
name="temperature",
display_name="Temperature",
value=0.7,
range_spec=RangeSpec(min=0, max=2, step=0.01),
info="Controls randomness. Lower values are more deterministic, higher values are more creative.",
advanced=True,
),
IntInput(
name="max_tokens",
display_name="Max Tokens",
info="Maximum number of tokens to generate",
advanced=True,
),
IntInput(
name="seed",
display_name="Seed",
info="Seed for reproducible outputs.",
value=1,
advanced=True,
),
BoolInput(
name="json_mode",
display_name="JSON Mode",
info="If enabled, the model will be asked to return a JSON object.",
advanced=True,
),
]
def get_models(self, token_override: str | None = None) -> list[str]:
base_url = "https://api.cometapi.com/v1"
url = f"{base_url}/models"
headers = {"Content-Type": "application/json"}
# Add Bearer Authorization when API key is available
api_key_source = token_override if token_override else getattr(self, "api_key", None)
if api_key_source:
token = api_key_source.get_secret_value() if isinstance(api_key_source, SecretStr) else str(api_key_source)
headers["Authorization"] = f"Bearer {token}"
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
# Safely parse JSON; fallback to defaults on failure
try:
model_list = response.json()
except (json.JSONDecodeError, ValueError) as e:
self.status = f"Error decoding models response: {e}"
return MODEL_NAMES
return [model["id"] for model in model_list.get("data", [])]
except requests.RequestException as e:
self.status = f"Error fetching models: {e}"
return MODEL_NAMES
@override
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
if field_name == "api_key":
models = self.get_models(field_value)
model_cfg = build_config.get("model_name", {})
# Preserve placeholder (fallback to existing value or a generic prompt)
placeholder = model_cfg.get("placeholder", model_cfg.get("value", "Select a model"))
current_value = model_cfg.get("value")
options = list(models) if models else []
# Ensure current value stays visible even if not present in fetched options
if current_value and current_value not in options:
options = [current_value, *options]
model_cfg["options"] = options
model_cfg["placeholder"] = placeholder
build_config["model_name"] = model_cfg
return build_config
def build_model(self) -> LanguageModel: # type: ignore[type-var]
api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = getattr(self, "model_kwargs", {}) or {}
json_mode = self.json_mode
seed = self.seed
# Ensure a valid model was selected
if not model_name or model_name == "Select a model":
msg = "Please select a valid CometAPI model."
raise ValueError(msg)
try:
# Extract raw API key safely
_api_key = api_key.get_secret_value() if isinstance(api_key, SecretStr) else api_key
output = ChatOpenAI(
model=model_name,
api_key=_api_key or None,
max_tokens=max_tokens or None,
temperature=temperature,
model_kwargs=model_kwargs,
streaming=bool(self.stream),
seed=seed,
base_url="https://api.cometapi.com/v1",
)
except (TypeError, ValueError) as e:
msg = "Could not connect to CometAPI."
raise ValueError(msg) from e
if json_mode:
output = output.bind(response_format={"type": "json_object"})
return output
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/cometapi/cometapi.py",
"license": "MIT License",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/tests/unit/components/tools/test_python_code_structured_tool.py | from unittest.mock import patch
import pytest
from langflow.components.tools.python_code_structured_tool import PythonCodeStructuredTool
from tests.base import ComponentTestBaseWithoutClient
class TestPythonCodeStructuredTool(ComponentTestBaseWithoutClient):
"""Test cases for PythonCodeStructuredTool component."""
@pytest.fixture
def component_class(self):
"""Return the component class to test."""
return PythonCodeStructuredTool
@pytest.fixture
def default_kwargs(self):
"""Return the default kwargs for the component."""
return {
"tool_code": "def test_func(): return 42",
"tool_name": "test_tool",
"tool_description": "Test description",
"return_direct": False,
"tool_function": "test_func",
"global_variables": [],
}
@pytest.fixture
def file_names_mapping(self):
"""Return an empty list since this component doesn't have version-specific files."""
return []
async def test_initialization(self, component_class, default_kwargs):
"""Test proper initialization of component."""
component = await self.component_setup(component_class, default_kwargs)
assert component.display_name == "Python Code Structured"
assert component.description == "structuredtool dataclass code to tool"
assert component.name == "PythonCodeStructuredTool"
assert component.icon == "Python"
assert component.legacy is True
# Check DEFAULT_KEYS
expected_keys = [
"code",
"_type",
"text_key",
"tool_code",
"tool_name",
"tool_description",
"return_direct",
"tool_function",
"global_variables",
"_classes",
"_functions",
]
assert expected_keys == component.DEFAULT_KEYS
async def test_field_order(self, component_class, default_kwargs):
"""Test field order configuration."""
component = await self.component_setup(component_class, default_kwargs)
# field_order can be None if not explicitly set at instance level
assert hasattr(component, "field_order")
# The class-level field_order is what we expect
expected_order = ["name", "description", "tool_code", "return_direct", "tool_function"]
assert PythonCodeStructuredTool.field_order == expected_order
async def test_inputs_configuration(self, component_class, default_kwargs):
"""Test that inputs are properly configured."""
component = await self.component_setup(component_class, default_kwargs)
# The component should have inputs defined
assert hasattr(component, "inputs")
assert len(component.inputs) > 0
# Check for expected input names
input_names = [input_field.name for input_field in component.inputs]
expected_inputs = ["tool_code", "tool_name", "tool_description", "return_direct", "tool_function"]
for expected_input in expected_inputs:
assert expected_input in input_names
async def test_outputs_configuration(self, component_class, default_kwargs):
"""Test that outputs are properly configured."""
component = await self.component_setup(component_class, default_kwargs)
assert hasattr(component, "outputs")
assert len(component.outputs) > 0
@pytest.mark.asyncio
async def test_build_tool_method_exists(self, component_class, default_kwargs):
"""Test that build_tool method exists and can be called."""
component = await self.component_setup(component_class, default_kwargs)
# Set up required attributes for build_tool
component.tool_code = "def test_func(): return 42"
component.tool_name = "test_tool"
component.tool_description = "Test description"
component.return_direct = False
component.tool_function = "test_func"
component.global_variables = []
# Mock the internal parsing methods
with patch.object(component, "_parse_code") as mock_parse:
mock_parse.return_value = ({}, [{"name": "test_func", "args": []}])
# The method should exist and be callable
assert hasattr(component, "build_tool")
assert callable(component.build_tool)
# We won't test the full execution due to complex setup requirements
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/components/tools/test_python_code_structured_tool.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/core/test_celery_app.py | """Unit tests for langflow.core.celery_app module."""
from unittest.mock import MagicMock, patch
import pytest
@pytest.mark.skip(reason="Skipping celery app tests as they cover unused code")
class TestMakeCelery:
"""Unit tests for the make_celery function."""
def test_make_celery_creates_celery_instance(self):
"""Test that make_celery creates a functional Celery instance."""
from langflow.core.celery_app import make_celery
# Create a mock config module
mock_config = MagicMock()
mock_config.broker_url = "memory://"
mock_config.result_backend = "cache+memory://"
mock_config.accept_content = ["json"]
# Mock the import of the config module
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config}):
# Act - Create a real Celery instance
celery_app = make_celery("test_app", "langflow.core.celeryconfig")
# Assert - Test actual functionality
assert celery_app.main == "test_app"
assert hasattr(celery_app, "config_from_object")
assert hasattr(celery_app, "send_task")
assert hasattr(celery_app, "control")
# Verify the app can be used (basic functionality)
assert celery_app.conf.broker_url == "memory://"
assert celery_app.conf.result_backend == "cache+memory://"
def test_make_celery_configures_from_object(self):
"""Test that configuration is actually applied to the Celery instance."""
from langflow.core.celery_app import make_celery
# Create a mock config module with specific values
mock_config = MagicMock()
mock_config.broker_url = "redis://test:6379/0"
mock_config.result_backend = "redis://test:6379/0"
mock_config.accept_content = ["json", "pickle"]
mock_config.task_serializer = "json"
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config}):
celery_app = make_celery("test_app", "langflow.core.celeryconfig")
# Verify configuration was actually applied
assert celery_app.conf.broker_url == "redis://test:6379/0"
assert celery_app.conf.result_backend == "redis://test:6379/0"
assert celery_app.conf.accept_content == ["json", "pickle"]
def test_make_celery_sets_task_routes(self):
"""Test that different app names create different Celery instances."""
from langflow.core.celery_app import make_celery
mock_config = MagicMock()
mock_config.broker_url = "memory://"
mock_config.result_backend = "cache+memory://"
mock_config.accept_content = ["json"]
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config}):
app1 = make_celery("app1", "langflow.core.celeryconfig")
app2 = make_celery("app2", "langflow.core.celeryconfig")
# Different apps should have different main names
assert app1.main == "app1"
assert app2.main == "app2"
assert app1 is not app2
def test_make_celery_returns_celery_instance(self):
"""Test that the returned Celery app can perform basic operations."""
from langflow.core.celery_app import make_celery
mock_config = MagicMock()
mock_config.broker_url = "memory://"
mock_config.result_backend = "cache+memory://"
mock_config.accept_content = ["json"]
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config}):
celery_app = make_celery("test_app", "langflow.core.celeryconfig")
# Test that the app can be inspected
assert celery_app.main == "test_app"
# Test that it has the expected Celery interface
assert hasattr(celery_app, "send_task")
assert hasattr(celery_app, "control")
assert hasattr(celery_app, "conf")
# Test that configuration is accessible
assert hasattr(celery_app.conf, "broker_url")
assert hasattr(celery_app.conf, "result_backend")
def test_make_celery_with_different_app_names(self):
"""Test that the Celery app can work with actual task definitions."""
from langflow.core.celery_app import make_celery
mock_config = MagicMock()
mock_config.broker_url = "memory://"
mock_config.result_backend = "cache+memory://"
mock_config.accept_content = ["json"]
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config}):
celery_app = make_celery("test_app", "langflow.core.celeryconfig")
# Define a simple task
@celery_app.task
def test_task(x, y):
return x + y
# Test that the task is registered (Celery uses full module path)
# The task will be registered with the full module path
task_found = False
for task_name in celery_app.tasks:
if task_name.endswith(".test_task"):
task_found = True
task_info = celery_app.tasks[task_name]
assert task_info.name == task_name
break
assert task_found, "test_task should be registered in celery_app.tasks"
def test_make_celery_with_different_configs(self):
"""Test that make_celery works with different configuration strings."""
from langflow.core.celery_app import make_celery
# Test with different config modules
mock_config1 = MagicMock()
mock_config1.broker_url = "redis://test1:6379/0"
mock_config1.result_backend = "redis://test1:6379/0"
mock_config1.accept_content = ["json"]
mock_config2 = MagicMock()
mock_config2.broker_url = "redis://test2:6379/0"
mock_config2.result_backend = "redis://test2:6379/0"
mock_config2.accept_content = ["pickle"]
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config1}):
app1 = make_celery("test_app", "langflow.core.celeryconfig")
assert app1.conf.broker_url == "redis://test1:6379/0"
with patch.dict("sys.modules", {"langflow.core.celeryconfig": mock_config2}):
app2 = make_celery("test_app", "langflow.core.celeryconfig")
assert app2.conf.broker_url == "redis://test2:6379/0"
def test_make_celery_function_signature(self):
"""Test that make_celery function has the expected signature."""
import inspect
from langflow.core.celery_app import make_celery
# Assert
sig = inspect.signature(make_celery)
params = list(sig.parameters.keys())
assert len(params) == 2
assert "app_name" in params
assert "config" in params
def test_make_celery_docstring(self):
"""Test that make_celery function exists and is callable."""
from langflow.core.celery_app import make_celery
# Assert
# The function exists and is callable
assert callable(make_celery)
assert make_celery.__name__ == "make_celery"
def test_make_celery_error_handling(self):
"""Test that make_celery function handles errors appropriately."""
from langflow.core.celery_app import make_celery
# Test with invalid config that causes Celery to fail
with patch("langflow.core.celery_app.Celery") as mock_celery_class:
mock_celery_class.side_effect = Exception("Celery creation failed")
with pytest.raises(Exception, match="Celery creation failed"):
make_celery("test_app", "test.config")
def test_make_celery_configuration_application(self):
"""Test that the module-level celery_app instance is created correctly."""
# This tests the actual instance created at module level
from langflow.core.celery_app import celery_app
# Should be a Celery instance
assert hasattr(celery_app, "main")
assert hasattr(celery_app, "conf")
assert hasattr(celery_app, "send_task")
# Should have the expected app name
assert celery_app.main == "langflow"
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/core/test_celery_app.py",
"license": "MIT License",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/core/test_celeryconfig.py | """Unit tests for langflow.core.celeryconfig module."""
# Import the module to test
from langflow.core import celeryconfig
class TestCeleryConfigAcceptContent:
"""Unit tests for accept_content configuration."""
def test_accept_content_configuration(self):
"""Test that accept_content is set to the expected values."""
# This should be consistent regardless of environment
expected_content = ["json", "pickle"]
assert celeryconfig.accept_content == expected_content
def test_accept_content_types(self):
"""Test that accept_content contains the expected content types."""
assert "json" in celeryconfig.accept_content
assert "pickle" in celeryconfig.accept_content
assert len(celeryconfig.accept_content) == 2
def test_accept_content_is_list(self):
"""Test that accept_content is a list type."""
assert isinstance(celeryconfig.accept_content, list)
def test_accept_content_contains_strings(self):
"""Test that accept_content contains only string values."""
for content_type in celeryconfig.accept_content:
assert isinstance(content_type, str)
class TestCeleryConfigVariables:
"""Unit tests for configuration variables."""
def test_required_config_variables_exist(self):
"""Test that all required configuration variables are defined."""
required_vars = ["broker_url", "result_backend", "accept_content"]
for var in required_vars:
assert hasattr(celeryconfig, var), f"Missing required config variable: {var}"
def test_config_variables_have_expected_types(self):
"""Test that configuration variables have the expected types."""
assert isinstance(celeryconfig.broker_url, str)
assert isinstance(celeryconfig.result_backend, str)
assert isinstance(celeryconfig.accept_content, list)
def test_broker_url_format(self):
"""Test that broker_url follows expected format."""
broker_url = celeryconfig.broker_url
# Should be either Redis or RabbitMQ format
assert broker_url.startswith(("redis://", "amqp://")), f"Unexpected broker_url format: {broker_url}"
def test_result_backend_format(self):
"""Test that result_backend follows expected format."""
result_backend = celeryconfig.result_backend
# Should be Redis format
assert result_backend.startswith("redis://"), f"Unexpected result_backend format: {result_backend}"
def test_broker_url_not_empty(self):
"""Test that broker_url is not an empty string."""
assert len(celeryconfig.broker_url) > 0
def test_result_backend_not_empty(self):
"""Test that result_backend is not an empty string."""
assert len(celeryconfig.result_backend) > 0
class TestCeleryConfigStructure:
"""Unit tests for configuration structure."""
def test_broker_url_contains_protocol(self):
"""Test that broker_url contains a valid protocol."""
broker_url = celeryconfig.broker_url
assert "://" in broker_url
def test_result_backend_contains_protocol(self):
"""Test that result_backend contains a valid protocol."""
result_backend = celeryconfig.result_backend
assert "://" in result_backend
def test_broker_url_contains_host(self):
"""Test that broker_url contains a host component."""
broker_url = celeryconfig.broker_url
# Remove protocol part
if "://" in broker_url:
host_part = broker_url.split("://")[1]
assert len(host_part) > 0
def test_result_backend_contains_host(self):
"""Test that result_backend contains a host component."""
result_backend = celeryconfig.result_backend
# Remove protocol part
if "://" in result_backend:
host_part = result_backend.split("://")[1]
assert len(host_part) > 0
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/core/test_celeryconfig.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_async_helpers.py | from unittest.mock import Mock, patch
import pytest
from lfx.utils.async_helpers import run_until_complete, timeout_context
class TestTimeoutContext:
"""Test cases for timeout_context function."""
def test_timeout_context_exists(self):
"""Test that timeout_context function exists and is callable."""
# Check if timeout_context is callable
assert callable(timeout_context)
def test_timeout_context_is_async_context_manager(self):
"""Test that timeout_context returns an async context manager."""
ctx_manager = timeout_context(1.0)
# Check it has async context manager methods
assert hasattr(ctx_manager, "__aenter__")
assert hasattr(ctx_manager, "__aexit__")
assert callable(ctx_manager.__aenter__)
assert callable(ctx_manager.__aexit__)
class TestRunUntilComplete:
"""Test cases for run_until_complete function."""
def test_run_until_complete_basic(self):
"""Test basic functionality of run_until_complete."""
async def simple_coro():
return 42
result = run_until_complete(simple_coro())
assert result == 42
def test_run_until_complete_with_exception(self):
"""Test run_until_complete with exception."""
async def failing_coro():
msg = "Test error"
raise ValueError(msg)
with pytest.raises(ValueError, match="Test error"):
run_until_complete(failing_coro())
def test_run_until_complete_different_return_types(self):
"""Test run_until_complete with different return types."""
test_cases = [
(42, int),
("hello", str),
([1, 2, 3], list),
({"key": "value"}, dict),
(None, type(None)),
]
for expected_value, expected_type in test_cases:
async def typed_coro(val=expected_value):
return val
result = run_until_complete(typed_coro())
assert result == expected_value
assert isinstance(result, expected_type)
@patch("asyncio.get_running_loop")
def test_run_until_complete_no_running_loop(self, mock_get_running_loop):
"""Test run_until_complete when no event loop is running."""
# Mock no running loop (raises RuntimeError)
mock_get_running_loop.side_effect = RuntimeError("No running event loop")
async def simple_coro():
return "no_loop"
with patch("asyncio.run") as mock_run:
mock_run.return_value = "no_loop"
coro = simple_coro()
try:
result = run_until_complete(coro)
finally:
# Close the coroutine to avoid "coroutine was never awaited" warning
# since the mocked asyncio.run doesn't actually run it
coro.close()
mock_run.assert_called_once()
assert result == "no_loop"
@patch("asyncio.get_running_loop")
@patch("concurrent.futures.ThreadPoolExecutor")
def test_run_until_complete_with_running_loop_path(self, mock_executor_class, mock_get_running_loop):
"""Test run_until_complete when event loop is already running."""
# Mock that there is a running loop
mock_get_running_loop.return_value = Mock()
# Mock the ThreadPoolExecutor and future
mock_executor = Mock()
mock_future = Mock()
mock_future.result.return_value = "thread_result"
mock_executor.submit.return_value = mock_future
mock_executor_class.return_value.__enter__.return_value = mock_executor
async def simple_coro():
return "thread_result"
coro = simple_coro()
try:
result = run_until_complete(coro)
finally:
# Close the coroutine to avoid "coroutine was never awaited" warning
# since the mocked executor doesn't actually run it
coro.close()
# Verify ThreadPoolExecutor was used
mock_executor_class.assert_called_once()
mock_executor.submit.assert_called_once()
assert result == "thread_result"
@patch("asyncio.get_running_loop")
@patch("concurrent.futures.ThreadPoolExecutor")
def test_run_until_complete_thread_pool_exception(self, mock_executor_class, mock_get_running_loop):
"""Test run_until_complete handles thread pool exceptions."""
mock_get_running_loop.return_value = Mock()
async def failing_coro():
msg = "Thread pool test error"
raise ValueError(msg)
# Mock executor to raise exception
mock_executor = Mock()
mock_future = Mock()
mock_future.result.side_effect = ValueError("Thread pool test error")
mock_executor.submit.return_value = mock_future
mock_executor_class.return_value.__enter__.return_value = mock_executor
coro = failing_coro()
try:
with pytest.raises(ValueError, match="Thread pool test error"):
run_until_complete(coro)
finally:
# Close the coroutine to avoid "coroutine was never awaited" warning
# since the mocked executor doesn't actually run it
coro.close()
@patch("asyncio.get_running_loop")
@patch("concurrent.futures.ThreadPoolExecutor")
@patch("asyncio.new_event_loop")
def test_run_until_complete_new_loop_cleanup(self, mock_new_loop, mock_executor_class, mock_get_running_loop):
"""Test that new event loop is properly cleaned up."""
mock_get_running_loop.return_value = Mock()
# Event loop setup for thread execution
mock_loop_instance = Mock()
mock_loop_instance.run_until_complete.return_value = "cleanup_test"
mock_new_loop.return_value = mock_loop_instance
mock_executor = Mock()
mock_future = Mock()
mock_future.result.return_value = "cleanup_test"
mock_executor.submit.return_value = mock_future
mock_executor_class.return_value.__enter__.return_value = mock_executor
async def simple_coro():
return "cleanup_test"
coro = simple_coro()
try:
result = run_until_complete(coro)
finally:
# Close the coroutine to avoid "coroutine was never awaited" warning
# since the mocked executor doesn't actually run it
coro.close()
# Verify the loop operations happened in the thread
assert result == "cleanup_test"
mock_executor.submit.assert_called_once()
@patch("asyncio.get_running_loop")
@patch("concurrent.futures.ThreadPoolExecutor")
@patch("asyncio.new_event_loop")
def test_run_until_complete_new_loop_exception_cleanup(
self, mock_new_loop, mock_executor_class, mock_get_running_loop
):
"""Test that event loop is cleaned up even when exception occurs."""
mock_get_running_loop.return_value = Mock()
# Event loop setup for thread execution
async def failing_coro():
msg = "Loop exception test"
raise RuntimeError(msg)
mock_loop_instance = Mock()
mock_loop_instance.run_until_complete.side_effect = RuntimeError("Loop exception test")
mock_new_loop.return_value = mock_loop_instance
mock_executor = Mock()
mock_future = Mock()
mock_future.result.side_effect = RuntimeError("Loop exception test")
mock_executor.submit.return_value = mock_future
mock_executor_class.return_value.__enter__.return_value = mock_executor
coro = failing_coro()
try:
with pytest.raises(RuntimeError, match="Loop exception test"):
run_until_complete(coro)
finally:
# Close the coroutine to avoid "coroutine was never awaited" warning
# since the mocked executor doesn't actually run it
coro.close()
# Verify executor was still called
mock_executor.submit.assert_called_once()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_async_helpers.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_component_utils.py | from unittest.mock import Mock
from langflow.schema.dotdict import dotdict
from langflow.utils.component_utils import (
DEFAULT_FIELDS,
add_fields,
delete_fields,
get_fields,
merge_build_configs,
set_current_fields,
set_field_advanced,
set_field_display,
set_multiple_field_advanced,
set_multiple_field_display,
update_fields,
update_input_types,
)
class TestUpdateFields:
"""Test cases for update_fields function."""
def test_update_existing_fields(self):
"""Test updating existing fields in build_config."""
build_config = dotdict({"field1": "old_value", "field2": "old_value2"})
fields = {"field1": "new_value", "field2": "new_value2"}
result = update_fields(build_config, fields)
assert result["field1"] == "new_value"
assert result["field2"] == "new_value2"
def test_update_non_existing_fields_ignored(self):
"""Test that non-existing fields are ignored."""
build_config = dotdict({"field1": "value1"})
fields = {"field1": "new_value", "non_existing": "ignored"}
result = update_fields(build_config, fields)
assert result["field1"] == "new_value"
assert "non_existing" not in result
def test_update_empty_fields(self):
"""Test updating with empty fields dict."""
build_config = dotdict({"field1": "value1"})
fields = {}
result = update_fields(build_config, fields)
assert result["field1"] == "value1"
def test_update_fields_returns_same_object(self):
"""Test that update_fields modifies and returns the same object."""
build_config = dotdict({"field1": "value1"})
fields = {"field1": "new_value"}
result = update_fields(build_config, fields)
assert result is build_config
assert build_config["field1"] == "new_value"
class TestAddFields:
"""Test cases for add_fields function."""
def test_add_new_fields(self):
"""Test adding new fields to build_config."""
build_config = dotdict({"existing": "value"})
fields = {"new_field1": "value1", "new_field2": "value2"}
result = add_fields(build_config, fields)
assert result["existing"] == "value"
assert result["new_field1"] == "value1"
assert result["new_field2"] == "value2"
def test_add_fields_overwrites_existing(self):
"""Test that add_fields overwrites existing fields."""
build_config = dotdict({"field1": "old_value"})
fields = {"field1": "new_value", "field2": "value2"}
result = add_fields(build_config, fields)
assert result["field1"] == "new_value"
assert result["field2"] == "value2"
def test_add_empty_fields(self):
"""Test adding empty fields dict."""
build_config = dotdict({"field1": "value1"})
fields = {}
result = add_fields(build_config, fields)
assert result["field1"] == "value1"
assert len(result) == 1
def test_add_fields_returns_same_object(self):
"""Test that add_fields modifies and returns the same object."""
build_config = dotdict({"field1": "value1"})
fields = {"field2": "value2"}
result = add_fields(build_config, fields)
assert result is build_config
class TestDeleteFields:
"""Test cases for delete_fields function."""
def test_delete_fields_with_dict(self):
"""Test deleting fields using dict parameter."""
build_config = dotdict({"field1": "value1", "field2": "value2", "field3": "value3"})
fields = {"field1": "ignored_value", "field2": "also_ignored"}
result = delete_fields(build_config, fields)
assert "field1" not in result
assert "field2" not in result
assert result["field3"] == "value3"
def test_delete_fields_with_list(self):
"""Test deleting fields using list parameter."""
build_config = dotdict({"field1": "value1", "field2": "value2", "field3": "value3"})
fields = ["field1", "field3"]
result = delete_fields(build_config, fields)
assert "field1" not in result
assert result["field2"] == "value2"
assert "field3" not in result
def test_delete_non_existing_fields(self):
"""Test deleting non-existing fields does not raise error."""
build_config = dotdict({"field1": "value1"})
fields = ["field1", "non_existing"]
result = delete_fields(build_config, fields)
assert "field1" not in result
assert len(result) == 0
def test_delete_fields_returns_same_object(self):
"""Test that delete_fields modifies and returns the same object."""
build_config = dotdict({"field1": "value1", "field2": "value2"})
fields = ["field1"]
result = delete_fields(build_config, fields)
assert result is build_config
class TestGetFields:
"""Test cases for get_fields function."""
def test_get_specific_fields(self):
"""Test getting specific fields from build_config."""
build_config = dotdict({"field1": "value1", "field2": "value2", "field3": "value3"})
fields = ["field1", "field3"]
result = get_fields(build_config, fields)
assert result == {"field1": "value1", "field3": "value3"}
def test_get_all_fields(self):
"""Test getting all fields when fields is None."""
build_config = dotdict({"field1": "value1", "field2": "value2"})
result = get_fields(build_config, None)
assert result == {"field1": "value1", "field2": "value2"}
def test_get_non_existing_fields(self):
"""Test getting non-existing fields returns empty dict."""
build_config = dotdict({"field1": "value1"})
fields = ["non_existing"]
result = get_fields(build_config, fields)
assert result == {}
def test_get_mixed_existing_and_non_existing(self):
"""Test getting mix of existing and non-existing fields."""
build_config = dotdict({"field1": "value1", "field2": "value2"})
fields = ["field1", "non_existing", "field2"]
result = get_fields(build_config, fields)
assert result == {"field1": "value1", "field2": "value2"}
def test_get_fields_returns_new_dict(self):
"""Test that get_fields returns a new dict, not modifying original."""
build_config = dotdict({"field1": "value1"})
result = get_fields(build_config, ["field1"])
result["field1"] = "modified"
assert build_config["field1"] == "value1" # Original unchanged
class TestUpdateInputTypes:
"""Test cases for update_input_types function."""
def test_update_input_types_dict_format(self):
"""Test updating input types for dict format fields."""
build_config = dotdict(
{
"field1": {"input_types": None},
"field2": {"input_types": ["existing"]},
"field3": {"other_prop": "value"},
}
)
result = update_input_types(build_config)
assert result["field1"]["input_types"] == []
assert result["field2"]["input_types"] == ["existing"] # Unchanged
# The function adds input_types to all dict fields that don't have it
assert result["field3"]["input_types"] == []
def test_update_input_types_object_format(self):
"""Test updating input types for object format fields."""
mock_obj1 = Mock()
mock_obj1.input_types = None
mock_obj2 = Mock()
mock_obj2.input_types = ["existing"]
build_config = dotdict({"field1": mock_obj1, "field2": mock_obj2, "field3": "string_value"})
_ = update_input_types(build_config)
assert mock_obj1.input_types == []
assert mock_obj2.input_types == ["existing"] # Unchanged
def test_update_input_types_mixed_formats(self):
"""Test updating input types with mixed dict and object formats."""
mock_obj = Mock()
mock_obj.input_types = None
build_config = dotdict(
{"dict_field": {"input_types": None, "other": "value"}, "obj_field": mock_obj, "string_field": "value"}
)
result = update_input_types(build_config)
assert result["dict_field"]["input_types"] == []
assert mock_obj.input_types == []
def test_update_input_types_no_input_types_attr(self):
"""Test updating with objects that don't have input_types attribute."""
mock_obj = Mock()
del mock_obj.input_types # Remove the attribute
build_config = dotdict({"field1": mock_obj})
# Should not raise error
result = update_input_types(build_config)
assert result is build_config
class TestSetFieldDisplay:
"""Test cases for set_field_display function."""
def test_set_field_display_existing_field(self):
"""Test setting display for existing field with show property."""
build_config = dotdict({"field1": {"show": True, "other": "value"}})
result = set_field_display(build_config, "field1", value=False)
assert result["field1"]["show"] is False
assert result["field1"]["other"] == "value"
def test_set_field_display_non_existing_field(self):
"""Test setting display for non-existing field does nothing."""
build_config = dotdict({"field1": {"show": True}})
result = set_field_display(build_config, "non_existing", value=False)
assert result["field1"]["show"] is True # Unchanged
def test_set_field_display_field_without_show(self):
"""Test setting display for field without show property does nothing."""
build_config = dotdict({"field1": {"other": "value"}})
result = set_field_display(build_config, "field1", value=False)
assert "show" not in result["field1"]
def test_set_field_display_none_value(self):
"""Test setting display with None value."""
build_config = dotdict({"field1": {"show": True}})
result = set_field_display(build_config, "field1", None)
assert result["field1"]["show"] is None
class TestSetMultipleFieldDisplay:
"""Test cases for set_multiple_field_display function."""
def test_set_multiple_field_display_with_fields_dict(self):
"""Test setting display for multiple fields using fields dict."""
build_config = dotdict({"field1": {"show": True}, "field2": {"show": True}, "field3": {"show": True}})
fields = {"field1": False, "field2": True}
result = set_multiple_field_display(build_config, fields=fields)
assert result["field1"]["show"] is False
assert result["field2"]["show"] is True
assert result["field3"]["show"] is True # Unchanged
def test_set_multiple_field_display_with_field_list(self):
"""Test setting display for multiple fields using field list."""
build_config = dotdict({"field1": {"show": True}, "field2": {"show": True}, "field3": {"show": True}})
field_list = ["field1", "field2"]
result = set_multiple_field_display(build_config, field_list=field_list, value=False)
assert result["field1"]["show"] is False
assert result["field2"]["show"] is False
assert result["field3"]["show"] is True # Unchanged
def test_set_multiple_field_display_no_params(self):
"""Test setting multiple field display with no parameters does nothing."""
build_config = dotdict({"field1": {"show": True}})
result = set_multiple_field_display(build_config)
assert result["field1"]["show"] is True # Unchanged
class TestSetFieldAdvanced:
"""Test cases for set_field_advanced function."""
def test_set_field_advanced_existing_field(self):
"""Test setting advanced for existing field."""
build_config = dotdict({"field1": {"advanced": False, "other": "value"}})
result = set_field_advanced(build_config, "field1", value=True)
assert result["field1"]["advanced"] is True
assert result["field1"]["other"] == "value"
def test_set_field_advanced_default_value(self):
"""Test setting advanced with default value (None -> False)."""
build_config = dotdict({"field1": {"other": "value"}})
result = set_field_advanced(build_config, "field1", None)
assert result["field1"]["advanced"] is False
def test_set_field_advanced_non_dict_field(self):
"""Test setting advanced for non-dict field does nothing."""
build_config = dotdict({"field1": "string_value"})
result = set_field_advanced(build_config, "field1", value=True)
assert result["field1"] == "string_value" # Unchanged
def test_set_field_advanced_creates_advanced_property(self):
"""Test that advanced property is created if it doesn't exist."""
build_config = dotdict({"field1": {"other": "value"}})
result = set_field_advanced(build_config, "field1", value=True)
assert result["field1"]["advanced"] is True
class TestSetMultipleFieldAdvanced:
"""Test cases for set_multiple_field_advanced function."""
def test_set_multiple_field_advanced_with_fields_dict(self):
"""Test setting advanced for multiple fields using fields dict."""
build_config = dotdict({"field1": {"advanced": False}, "field2": {"advanced": False}})
fields = {"field1": True, "field2": False}
result = set_multiple_field_advanced(build_config, fields=fields)
assert result["field1"]["advanced"] is True
assert result["field2"]["advanced"] is False
def test_set_multiple_field_advanced_with_field_list(self):
"""Test setting advanced for multiple fields using field list."""
build_config = dotdict({"field1": {"advanced": False}, "field2": {"advanced": False}})
field_list = ["field1", "field2"]
result = set_multiple_field_advanced(build_config, field_list=field_list, value=True)
assert result["field1"]["advanced"] is True
assert result["field2"]["advanced"] is True
class TestMergeBuildConfigs:
"""Test cases for merge_build_configs function."""
def test_merge_build_configs_simple(self):
"""Test merging simple build configurations."""
base_config = dotdict({"field1": "base_value", "field2": "base_value2"})
override_config = dotdict({"field1": "override_value", "field3": "new_value"})
result = merge_build_configs(base_config, override_config)
assert result["field1"] == "override_value" # Overridden
assert result["field2"] == "base_value2" # From base
assert result["field3"] == "new_value" # Added
def test_merge_build_configs_nested_dicts(self):
"""Test merging build configurations with nested dictionaries."""
base_config = dotdict({"field1": {"nested1": "base_value", "nested2": "base_value2"}})
override_config = dotdict({"field1": {"nested1": "override_value", "nested3": "new_value"}})
result = merge_build_configs(base_config, override_config)
assert result["field1"]["nested1"] == "override_value"
assert result["field1"]["nested2"] == "base_value2"
assert result["field1"]["nested3"] == "new_value"
def test_merge_build_configs_returns_new_object(self):
"""Test that merge returns a new dotdict object."""
base_config = dotdict({"field1": "base_value"})
override_config = dotdict({"field2": "override_value"})
result = merge_build_configs(base_config, override_config)
assert result is not base_config
assert isinstance(result, dotdict)
def test_merge_build_configs_override_dict_with_non_dict(self):
"""Test overriding dict with non-dict value."""
base_config = dotdict({"field1": {"nested": "value"}})
override_config = dotdict({"field1": "string_value"})
result = merge_build_configs(base_config, override_config)
assert result["field1"] == "string_value"
class TestSetCurrentFields:
"""Test cases for set_current_fields function."""
def test_set_current_fields_with_selected_action(self):
"""Test setting current fields with a selected action."""
build_config = dotdict(
{
"field1": {"show": False},
"field2": {"show": False},
"field3": {"show": False},
"code": {"show": False},
"_type": {"show": False},
}
)
action_fields = {"action1": ["field1", "field2"], "action2": ["field3"]}
result = set_current_fields(build_config, action_fields, "action1")
# Selected action fields should be shown
assert result["field1"]["show"] is True
assert result["field2"]["show"] is True
# Other action fields should be hidden
assert result["field3"]["show"] is False
# Default fields should be shown
assert result["code"]["show"] is True
assert result["_type"]["show"] is True
def test_set_current_fields_no_selected_action(self):
"""Test setting current fields with no selected action."""
build_config = dotdict({"field1": {"show": True}, "field2": {"show": True}, "code": {"show": False}})
action_fields = {"action1": ["field1"], "action2": ["field2"]}
result = set_current_fields(build_config, action_fields, None)
# All action fields should be hidden
assert result["field1"]["show"] is False
assert result["field2"]["show"] is False
# Default fields should be shown
assert result["code"]["show"] is True
def test_set_current_fields_custom_function(self):
"""Test setting current fields with custom function."""
build_config = dotdict(
{"field1": {"advanced": False}, "field2": {"advanced": False}, "code": {"advanced": False}}
)
action_fields = {"action1": ["field1"], "action2": ["field2"]}
result = set_current_fields(build_config, action_fields, "action1", func=set_field_advanced)
# Selected field should be True (not default_value)
assert result["field1"]["advanced"] is True # Selected field gets not default_value (True)
assert result["field2"]["advanced"] is False # Non-selected field gets default_value (False)
assert result["code"]["advanced"] is True # Default field gets not default_value (True)
def test_set_current_fields_custom_default_value(self):
"""Test setting current fields with custom default value."""
build_config = dotdict({"field1": {"show": False}, "field2": {"show": False}, "code": {"show": False}})
action_fields = {"action1": ["field1"], "action2": ["field2"]}
result = set_current_fields(
build_config,
action_fields,
"action1",
default_value=True, # Inverted logic
)
# With default_value=True:
# - Selected field gets not default_value = False
# - Non-selected fields get default_value = True
# - Default fields get not default_value = False
assert result["field1"]["show"] is False # Selected field gets not default_value
assert result["field2"]["show"] is True # Non-selected field gets default_value
assert result["code"]["show"] is False # Default field gets not default_value
def test_set_current_fields_no_default_fields(self):
"""Test setting current fields with no default fields."""
build_config = dotdict({"field1": {"show": False}, "code": {"show": False}})
action_fields = {"action1": ["field1"]}
result = set_current_fields(build_config, action_fields, "action1", default_fields=None)
# Only action field should be affected
assert result["field1"]["show"] is True
assert result["code"]["show"] is False # Unchanged
def test_default_fields_constant(self):
"""Test that DEFAULT_FIELDS constant has expected value."""
assert DEFAULT_FIELDS == ["code", "_type"]
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_component_utils.py",
"license": "MIT License",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_compression.py | import gzip
import json
from datetime import date, datetime, timezone
from unittest.mock import patch
from fastapi import Response
from langflow.utils.compression import compress_response
class TestCompressResponse:
"""Test cases for compress_response function."""
def test_compress_response_simple_dict(self):
"""Test compressing a simple dictionary."""
data = {"message": "hello", "status": "success"}
response = compress_response(data)
assert isinstance(response, Response)
assert response.media_type == "application/json"
assert response.headers["Content-Encoding"] == "gzip"
assert response.headers["Vary"] == "Accept-Encoding"
assert "Content-Length" in response.headers
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_simple_list(self):
"""Test compressing a simple list."""
data = ["item1", "item2", "item3"]
response = compress_response(data)
assert isinstance(response, Response)
assert response.media_type == "application/json"
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_string(self):
"""Test compressing a string."""
data = "simple string message"
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_number(self):
"""Test compressing numeric data."""
data = 42
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_boolean(self):
"""Test compressing boolean data."""
data = True
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_none(self):
"""Test compressing None value."""
data = None
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data is None
def test_compress_response_nested_data(self):
"""Test compressing nested data structures."""
data = {
"users": [{"id": 1, "name": "Alice", "active": True}, {"id": 2, "name": "Bob", "active": False}],
"metadata": {"total": 2, "page": 1, "has_more": False},
"settings": None,
}
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_large_data(self):
"""Test compressing large data to verify compression effectiveness."""
# Create large data that should compress well (repeated patterns)
data = {"items": ["test_item"] * 1000, "metadata": {"repeated_value": "x" * 500}}
response = compress_response(data)
# Original JSON size
original_json = json.dumps(data).encode("utf-8")
original_size = len(original_json)
compressed_size = len(response.body)
# Verify compression occurred (should be significantly smaller)
assert compressed_size < original_size
assert compressed_size < original_size * 0.5 # At least 50% compression
# Verify content integrity
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_unicode_data(self):
"""Test compressing data with unicode characters."""
data = {
"message": "Hello 世界! 🌍 Émojis and accénts",
"unicode_string": "テスト データ",
"special_chars": "àáâãäåæçèéêë",
}
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_empty_dict(self):
"""Test compressing empty dictionary."""
data = {}
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_empty_list(self):
"""Test compressing empty list."""
data = []
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
assert parsed_data == data
def test_compress_response_headers(self):
"""Test that response has correct headers."""
data = {"test": "data"}
response = compress_response(data)
# Check required headers
assert response.headers["Content-Encoding"] == "gzip"
assert response.headers["Vary"] == "Accept-Encoding"
assert response.headers["Content-Length"] == str(len(response.body))
assert response.media_type == "application/json"
def test_compress_response_content_length_accuracy(self):
"""Test that Content-Length header matches actual body length."""
data = {"message": "test", "numbers": [1, 2, 3, 4, 5]}
response = compress_response(data)
content_length = int(response.headers["Content-Length"])
actual_length = len(response.body)
assert content_length == actual_length
@patch("langflow.utils.compression.jsonable_encoder")
def test_compress_response_jsonable_encoder_called(self, mock_encoder):
"""Test that jsonable_encoder is called on the data."""
data = {"test": "data"}
mock_encoder.return_value = data
compress_response(data)
mock_encoder.assert_called_once_with(data)
@patch("langflow.utils.compression.gzip.compress")
def test_compress_response_gzip_compression_level(self, mock_compress):
"""Test that gzip.compress is called with correct compression level."""
data = {"test": "data"}
mock_compress.return_value = b"compressed_data"
compress_response(data)
# Verify gzip.compress was called with compresslevel=6
mock_compress.assert_called_once()
call_args = mock_compress.call_args
assert call_args[1]["compresslevel"] == 6
def test_compress_response_with_custom_objects(self):
"""Test compressing data with objects that need JSON encoding."""
data = {
"timestamp": datetime(2023, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
"date": date(2023, 1, 1),
"message": "test with custom objects",
}
response = compress_response(data)
assert isinstance(response, Response)
# Decompress and verify content (datetime should be converted to string)
decompressed = gzip.decompress(response.body)
parsed_data = json.loads(decompressed.decode("utf-8"))
# Check that custom objects were properly serialized
assert "timestamp" in parsed_data
assert "date" in parsed_data
assert parsed_data["message"] == "test with custom objects"
def test_compress_response_compression_ratio(self):
"""Test compression ratio with different types of data."""
# Highly compressible data (lots of repetition)
repetitive_data = {"data": "a" * 1000}
# Less compressible data (more random)
import random
import string
random_data = {"data": ["".join(random.choices(string.ascii_letters, k=10)) for _ in range(100)]} # noqa: S311
rep_response = compress_response(repetitive_data)
rand_response = compress_response(random_data)
rep_original = len(json.dumps(repetitive_data).encode("utf-8"))
rand_original = len(json.dumps(random_data).encode("utf-8"))
rep_compressed = len(rep_response.body)
rand_compressed = len(rand_response.body)
# Repetitive data should have better compression ratio
rep_ratio = rep_compressed / rep_original
rand_ratio = rand_compressed / rand_original
assert rep_ratio < rand_ratio # Better compression for repetitive data
assert rep_ratio < 0.1 # Very good compression for repetitive data
def test_compress_response_error_handling_invalid_json(self):
"""Test error handling when data cannot be JSON serialized."""
# Create an object that cannot be JSON serialized
class NonSerializable:
def __init__(self):
self.func = lambda x: x
data = {"object": NonSerializable()}
# jsonable_encoder should handle this, but if it doesn't, test the behavior
try:
response = compress_response(data)
# If no exception, verify the response is still valid
assert isinstance(response, Response)
except (TypeError, ValueError):
# Expected behavior if jsonable_encoder can't handle the object
pass
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_compression.py",
"license": "MIT License",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_concurrency.py | import threading
import time
from unittest.mock import MagicMock, patch
import pytest
from lfx.utils.concurrency import KeyedMemoryLockManager, KeyedWorkerLockManager
class TestKeyedMemoryLockManager:
"""Test cases for KeyedMemoryLockManager class."""
def test_initialization(self):
"""Test proper initialization of KeyedMemoryLockManager."""
manager = KeyedMemoryLockManager()
assert isinstance(manager.locks, dict)
assert len(manager.locks) == 0
assert hasattr(manager, "global_lock")
assert hasattr(manager.global_lock, "acquire")
assert hasattr(manager.global_lock, "release")
def test_get_lock_creates_new_lock(self):
"""Test that _get_lock creates a new lock for new keys."""
manager = KeyedMemoryLockManager()
lock1 = manager._get_lock("key1")
assert hasattr(lock1, "acquire")
assert hasattr(lock1, "release")
assert "key1" in manager.locks
assert manager.locks["key1"] is lock1
def test_get_lock_returns_existing_lock(self):
"""Test that _get_lock returns existing lock for known keys."""
manager = KeyedMemoryLockManager()
lock1 = manager._get_lock("key1")
lock2 = manager._get_lock("key1")
assert lock1 is lock2
assert len(manager.locks) == 1
def test_get_lock_different_keys(self):
"""Test that different keys get different locks."""
manager = KeyedMemoryLockManager()
lock1 = manager._get_lock("key1")
lock2 = manager._get_lock("key2")
assert lock1 is not lock2
assert len(manager.locks) == 2
assert manager.locks["key1"] is lock1
assert manager.locks["key2"] is lock2
def test_context_manager_basic_functionality(self):
"""Test that the context manager works with basic functionality."""
manager = KeyedMemoryLockManager()
# Test that context manager doesn't raise exceptions
with manager.lock("test_key"):
assert "test_key" in manager.locks
# Lock should still exist after context
assert "test_key" in manager.locks
def test_context_manager_exception_handling(self):
"""Test that context manager handles exceptions properly."""
manager = KeyedMemoryLockManager()
# Test that exceptions don't break the manager
try:
with manager.lock("test_key"):
msg = "Test exception"
raise ValueError(msg)
except ValueError:
pass
# Manager should still be functional
assert "test_key" in manager.locks
# Should be able to use the same lock again
with manager.lock("test_key"):
pass
def test_concurrent_access_same_key(self):
"""Test concurrent access to the same key is serialized."""
manager = KeyedMemoryLockManager()
results = []
def worker(worker_id):
with manager.lock("shared_key"):
results.append(f"start_{worker_id}")
time.sleep(0.01) # Small delay to ensure serialization
results.append(f"end_{worker_id}")
threads = []
for i in range(3):
thread = threading.Thread(target=worker, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# Results should be properly serialized (start/end pairs together)
assert len(results) == 6
# Each worker should complete fully before next one starts
for i in range(3):
start_idx = results.index(f"start_{i}")
end_idx = results.index(f"end_{i}")
assert end_idx == start_idx + 1
def test_concurrent_access_different_keys(self):
"""Test concurrent access to different keys can proceed in parallel."""
manager = KeyedMemoryLockManager()
results = []
result_lock = threading.Lock()
def worker(worker_id, key):
with manager.lock(key):
with result_lock:
results.append(f"start_{worker_id}")
time.sleep(0.01)
with result_lock:
results.append(f"end_{worker_id}")
threads = []
for i in range(3):
thread = threading.Thread(target=worker, args=(i, f"key_{i}"))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# All threads should have completed
assert len(results) == 6
assert len([r for r in results if r.startswith("start_")]) == 3
assert len([r for r in results if r.startswith("end_")]) == 3
def test_lock_manager_thread_safety(self):
"""Test that the lock manager itself is thread-safe."""
manager = KeyedMemoryLockManager()
created_locks = []
def create_locks():
for i in range(10):
lock = manager._get_lock(f"key_{i}")
created_locks.append(lock)
threads = []
for _ in range(3):
thread = threading.Thread(target=create_locks)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# Should have created exactly 10 unique locks
assert len(manager.locks) == 10
assert len(created_locks) == 30 # 3 threads * 10 locks each
class TestKeyedWorkerLockManager:
"""Test cases for KeyedWorkerLockManager class."""
def test_initialization(self):
"""Test proper initialization of KeyedWorkerLockManager."""
with (
patch("lfx.utils.concurrency.user_cache_dir") as mock_cache_dir,
patch("lfx.utils.concurrency.Path") as mock_path,
):
mock_cache_dir.return_value = "/cache/dir"
mock_path_instance = MagicMock()
mock_path.return_value = mock_path_instance
manager = KeyedWorkerLockManager()
mock_cache_dir.assert_called_once_with("langflow")
assert manager.locks_dir == mock_path_instance.__truediv__.return_value
def test_validate_key_valid_keys(self):
"""Test that _validate_key accepts valid keys."""
manager = KeyedWorkerLockManager()
valid_keys = [
"valid_key",
"key123",
"KEY_WITH_CAPS",
"mix3d_K3y",
"_underscore_start",
"underscore_end_",
"123numbers",
"a",
"A",
"_",
"key_123_test",
]
for key in valid_keys:
assert manager._validate_key(key) is True
def test_validate_key_invalid_keys(self):
"""Test that _validate_key rejects invalid keys."""
manager = KeyedWorkerLockManager()
invalid_keys = [
"key-with-dashes",
"key with spaces",
"key.with.dots",
"key@symbol",
"key#hash",
"key$dollar",
"key%percent",
"key^caret",
"key&ersand",
"key*asterisk",
"key+plus",
"key=equals",
"key[bracket",
"key{brace",
"key/slash",
"key\\backslash",
"key|pipe",
"key:colon",
"key;semicolon",
"key'quote",
'key"doublequote',
"key<less",
"key>greater",
"key?question",
"", # empty string
]
for key in invalid_keys:
assert manager._validate_key(key) is False
def test_lock_with_valid_key(self):
"""Test lock context manager with valid key."""
manager = KeyedWorkerLockManager()
with patch("lfx.utils.concurrency.FileLock") as mock_filelock:
mock_lock_instance = MagicMock()
mock_filelock.return_value = mock_lock_instance
with manager.lock("valid_key"):
pass
# Verify FileLock was created with correct path
mock_filelock.assert_called_once()
# Verify context manager was used
mock_lock_instance.__enter__.assert_called_once()
mock_lock_instance.__exit__.assert_called_once()
def test_lock_with_invalid_key_raises_error(self):
"""Test that lock raises ValueError for invalid keys."""
manager = KeyedWorkerLockManager()
with (
pytest.raises(ValueError, match="Invalid key: invalid-key"),
manager.lock("invalid-key"),
):
pass
def test_lock_file_path_construction(self):
"""Test that lock file path is constructed correctly."""
with (
patch("lfx.utils.concurrency.user_cache_dir") as mock_cache_dir,
patch("lfx.utils.concurrency.Path") as mock_path,
patch("lfx.utils.concurrency.FileLock") as mock_filelock,
):
mock_cache_dir.return_value = "/cache"
mock_locks_dir = MagicMock()
mock_path.return_value.__truediv__.return_value = mock_locks_dir
manager = KeyedWorkerLockManager()
with manager.lock("test_key"):
pass
# Verify FileLock was called with correct path
mock_filelock.assert_called_once_with(mock_locks_dir.__truediv__.return_value)
mock_locks_dir.__truediv__.assert_called_once_with("test_key")
def test_lock_context_manager_exception_handling(self):
"""Test that lock is properly released even when exception occurs."""
manager = KeyedWorkerLockManager()
with patch("lfx.utils.concurrency.FileLock") as mock_filelock:
mock_lock_instance = MagicMock()
mock_filelock.return_value = mock_lock_instance
try:
with manager.lock("valid_key"):
msg = "Test exception"
raise ValueError(msg)
except ValueError:
pass
# Verify context manager was properly exited
mock_lock_instance.__exit__.assert_called_once()
def test_multiple_keys_create_different_locks(self):
"""Test that different keys create different file locks."""
manager = KeyedWorkerLockManager()
with patch("lfx.utils.concurrency.FileLock") as mock_filelock:
mock_filelock.return_value = MagicMock()
with manager.lock("key1"):
pass
with manager.lock("key2"):
pass
# Should have been called twice with different paths
assert mock_filelock.call_count == 2
call_args = [call[0][0] for call in mock_filelock.call_args_list]
assert call_args[0] != call_args[1] # Different paths
def test_validate_key_regex_pattern(self):
"""Test the regex pattern used for key validation."""
manager = KeyedWorkerLockManager()
# Test edge cases for the regex pattern
assert manager._validate_key("_") is True # Single underscore
assert manager._validate_key("1") is True # Single digit
assert manager._validate_key("a") is True # Single letter
assert manager._validate_key("Z") is True # Single capital letter
assert manager._validate_key("") is False # Empty string
assert manager._validate_key(" ") is False # Single space
def test_concurrent_worker_locks_same_key(self):
"""Test that multiple workers with same key are serialized."""
manager = KeyedWorkerLockManager()
results = []
def worker(worker_id):
try:
with manager.lock("shared_worker_key"):
results.append(f"worker_{worker_id}_start")
time.sleep(0.01)
results.append(f"worker_{worker_id}_end")
except Exception:
# Skip the test if file locking is not available in test environment
results.append(f"worker_{worker_id}_skipped")
threads = []
for i in range(2):
thread = threading.Thread(target=worker, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# If file locking worked, should be serialized
# If not available, workers would be skipped
if "skipped" not in "".join(results):
assert len(results) == 4
# Each worker should complete before the next starts
assert results[0].endswith("_start")
assert results[1].endswith("_end")
assert results[2].endswith("_start")
assert results[3].endswith("_end")
@patch("lfx.utils.concurrency.user_cache_dir")
def test_locks_directory_creation(self, mock_cache_dir):
"""Test that locks directory is created properly."""
mock_cache_dir.return_value = "/test/cache"
with patch("lfx.utils.concurrency.Path") as mock_path:
mock_path_instance = MagicMock()
mock_path.return_value = mock_path_instance
KeyedWorkerLockManager()
# Verify Path was called correctly
mock_path.assert_called_once_with("/test/cache", ensure_exists=True)
mock_path_instance.__truediv__.assert_called_once_with("worker_locks")
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_concurrency.py",
"license": "MIT License",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_data_structure.py | from unittest.mock import Mock
from langflow.schema.data import Data
from langflow.utils.data_structure import (
analyze_value,
get_data_structure,
get_sample_values,
get_type_str,
infer_list_type,
)
class TestInferListType:
"""Test cases for infer_list_type function."""
def test_empty_list(self):
"""Test empty list inference."""
result = infer_list_type([])
assert result == "list(unknown)"
def test_single_type_list(self):
"""Test list with single type."""
result = infer_list_type([1, 2, 3, 4])
assert result == "list(int)"
result = infer_list_type(["a", "b", "c"])
assert result == "list(str)"
def test_mixed_type_list(self):
"""Test list with mixed types."""
result = infer_list_type([1, "hello", 3.14])
assert "list(" in result
assert "|" in result # Should show mixed types
assert "int" in result
assert "str" in result
assert "float" in result
def test_max_samples_limit(self):
"""Test max_samples parameter."""
long_list = list(range(100)) # 100 integers
result = infer_list_type(long_list, max_samples=3)
assert result == "list(int)"
def test_boolean_list(self):
"""Test list with boolean values."""
result = infer_list_type([True, False, True])
assert result == "list(bool)"
def test_none_values_list(self):
"""Test list with None values."""
result = infer_list_type([None, None, None])
assert result == "list(null)"
def test_mixed_with_none(self):
"""Test mixed list including None."""
result = infer_list_type([1, None, "test"])
assert "null" in result
assert "int" in result
assert "str" in result
class TestGetTypeStr:
"""Test cases for get_type_str function."""
def test_basic_types(self):
"""Test basic Python types."""
assert get_type_str(None) == "null"
assert get_type_str(True) == "bool" # noqa: FBT003
assert get_type_str(False) == "bool" # noqa: FBT003
assert get_type_str(42) == "int"
assert get_type_str(3.14) == "float"
assert get_type_str("hello") == "str"
def test_collection_types(self):
"""Test collection types."""
result = get_type_str([1, 2, 3])
assert result == "list(int)"
result = get_type_str((1, 2, 3))
assert result == "list(int)"
result = get_type_str({1, 2, 3})
assert result == "list(int)"
result = get_type_str({"key": "value"})
assert result == "dict"
def test_date_string_detection(self):
"""Test detection of date-like strings."""
# Strings that contain the date detection patterns
date_strings = [
"this has a date in it",
"time stamp here",
"yyyy format string",
"mm/dd format",
"dd/mm format",
"yyyy-mm format",
]
for date_str in date_strings:
result = get_type_str(date_str)
assert result == "str(possible_date)"
# Test strings that don't match patterns
non_date_strings = ["2023-01-01", "January 1, 2023", "01/01/2023"]
for non_date_str in non_date_strings:
result = get_type_str(non_date_str)
assert result == "str"
def test_json_string_detection(self):
"""Test detection of JSON strings."""
json_string = '{"key": "value", "number": 42}'
result = get_type_str(json_string)
assert result == "str(json)"
# Invalid JSON should be regular string
invalid_json = '{"key": invalid}'
result = get_type_str(invalid_json)
assert result == "str"
def test_custom_objects(self):
"""Test custom object type detection."""
class CustomClass:
pass
obj = CustomClass()
result = get_type_str(obj)
assert result == "CustomClass"
def test_regular_string(self):
"""Test regular strings without special patterns."""
result = get_type_str("just a regular string")
assert result == "str"
result = get_type_str("no patterns here")
assert result == "str"
class TestAnalyzeValue:
"""Test cases for analyze_value function."""
def test_simple_values(self):
"""Test analysis of simple values."""
assert analyze_value(42) == "int"
assert analyze_value("hello") == "str"
assert analyze_value(True) == "bool" # noqa: FBT003
assert analyze_value(None) == "null"
def test_simple_list(self):
"""Test analysis of simple list."""
result = analyze_value([1, 2, 3])
assert "list(int)" in result
assert "[size=3]" in result
def test_empty_list(self):
"""Test analysis of empty list."""
result = analyze_value([])
assert result == "list(unknown)"
def test_nested_dict(self):
"""Test analysis of nested dictionary."""
nested_data = {"name": "John", "age": 30, "scores": [95, 87, 92]}
result = analyze_value(nested_data)
assert isinstance(result, dict)
assert result["name"] == "str"
assert result["age"] == "int"
assert "list(int)" in result["scores"]
def test_max_depth_limit(self):
"""Test max depth limitation."""
deeply_nested = {"level1": {"level2": {"level3": {"level4": "deep"}}}}
result = analyze_value(deeply_nested, max_depth=2)
# Should reach max depth at some point
assert "max_depth_reached" in str(result)
def test_size_hints_disabled(self):
"""Test with size hints disabled."""
result = analyze_value([1, 2, 3], size_hints=False)
assert "list(int)" in result
assert "[size=" not in result
def test_include_samples_for_complex_list(self):
"""Test sample inclusion for complex list structures."""
complex_list = [{"name": "John", "age": 30}, {"name": "Jane", "age": 25}]
result = analyze_value(complex_list, include_samples=True)
assert "sample:" in result
assert "list(dict)" in result
def test_include_samples_disabled(self):
"""Test with samples disabled."""
complex_list = [{"name": "John", "age": 30}, {"name": "Jane", "age": 25}]
result = analyze_value(complex_list, include_samples=False)
assert "sample:" not in result
def test_error_handling(self):
"""Test error handling in analysis."""
# Create an object that raises exception on access
class ProblematicClass:
def __getitem__(self, key):
msg = "Access error"
raise RuntimeError(msg)
problematic = ProblematicClass()
result = analyze_value(problematic)
assert "ProblematicClass" in result or "error(" in result
def test_tuple_and_set_handling(self):
"""Test analysis of tuples and sets."""
tuple_result = analyze_value((1, 2, 3))
assert "list(int)" in tuple_result
set_result = analyze_value({1, 2, 3})
assert "list(int)" in set_result
class TestGetDataStructure:
"""Test cases for get_data_structure function."""
def test_data_object_input(self):
"""Test with Data object as input."""
mock_data = Mock(spec=Data)
mock_data.data = {"field": "value"}
result = get_data_structure(mock_data)
assert "structure" in result
assert isinstance(result["structure"], dict)
assert result["structure"]["field"] == "str"
def test_dict_input(self):
"""Test with dictionary as input."""
test_dict = {
"name": "Test",
"count": 42,
"items": [1, 2, 3],
"metadata": {
"created": "date created on server", # Contains "date" pattern
"tags": ["tag1", "tag2"],
},
}
result = get_data_structure(test_dict)
assert "structure" in result
structure = result["structure"]
assert structure["name"] == "str"
assert structure["count"] == "int"
assert "list(int)" in structure["items"]
assert isinstance(structure["metadata"], dict)
assert "str(possible_date)" in structure["metadata"]["created"]
assert "list(str)" in structure["metadata"]["tags"]
def test_with_sample_values(self):
"""Test including sample values."""
test_dict = {"numbers": [1, 2, 3, 4, 5], "nested": {"key": "value"}}
result = get_data_structure(test_dict, include_sample_values=True)
assert "structure" in result
assert "samples" in result
assert "numbers" in result["samples"]
assert "nested" in result["samples"]
def test_max_depth_parameter(self):
"""Test max_depth parameter."""
deeply_nested = {"level1": {"level2": {"level3": {"level4": "deep_value"}}}}
result = get_data_structure(deeply_nested, max_depth=2)
# Should have limited depth in analysis
structure = result["structure"]
assert "level1" in structure
# Check that max depth was respected
assert "max_depth_reached" in str(structure)
def test_size_hints_disabled(self):
"""Test with size hints disabled."""
test_dict = {"items": [1, 2, 3, 4, 5]}
result = get_data_structure(test_dict, size_hints=False)
structure = result["structure"]
assert "list(int)" in structure["items"]
assert "[size=" not in structure["items"]
def test_sample_structure_disabled(self):
"""Test with sample structure disabled."""
test_dict = {"complex_list": [{"a": 1}, {"b": 2}]}
result = get_data_structure(test_dict, include_sample_structure=False)
structure = result["structure"]
assert "sample:" not in structure["complex_list"]
def test_max_sample_size(self):
"""Test max_sample_size parameter."""
test_dict = {"long_list": list(range(10))}
result = get_data_structure(test_dict, include_sample_values=True, max_sample_size=3)
samples = result["samples"]
assert len(samples["long_list"]) == 3
class TestGetSampleValues:
"""Test cases for get_sample_values function."""
def test_simple_values(self):
"""Test sampling simple values."""
assert get_sample_values(42) == 42
assert get_sample_values("hello") == "hello"
assert get_sample_values(True) is True # noqa: FBT003
def test_list_sampling(self):
"""Test sampling from lists."""
long_list = list(range(10))
result = get_sample_values(long_list, max_items=3)
assert len(result) == 3
assert result == [0, 1, 2]
def test_tuple_sampling(self):
"""Test sampling from tuples."""
test_tuple = tuple(range(5))
result = get_sample_values(test_tuple, max_items=2)
assert len(result) == 2
assert result == [0, 1]
def test_set_sampling(self):
"""Test sampling from sets."""
test_set = {1, 2, 3, 4, 5}
result = get_sample_values(test_set, max_items=3)
assert len(result) == 3
# Order may vary for sets, but should contain elements from the set
assert all(item in test_set for item in result)
def test_dict_sampling(self):
"""Test sampling from dictionaries."""
test_dict = {"list_field": [1, 2, 3, 4, 5], "simple_field": "value", "nested_dict": {"inner": [10, 20, 30]}}
result = get_sample_values(test_dict, max_items=2)
assert isinstance(result, dict)
assert "simple_field" in result
assert result["simple_field"] == "value"
assert len(result["list_field"]) == 2 # Should be limited by max_items
assert len(result["nested_dict"]["inner"]) == 2 # Nested sampling
def test_nested_structure_sampling(self):
"""Test sampling from nested structures."""
nested_structure = {"data": [{"items": [1, 2, 3, 4, 5]}, {"items": [6, 7, 8, 9, 10]}]}
result = get_sample_values(nested_structure, max_items=2)
assert len(result["data"]) == 2
# The function recursively applies max_items at each level
# But the nested dictionaries are returned as-is, then their contents are sampled
# Let's just verify the structure is correct without specific lengths
assert "items" in result["data"][0]
assert "items" in result["data"][1]
assert isinstance(result["data"][0]["items"], list)
assert isinstance(result["data"][1]["items"], list)
def test_empty_collections(self):
"""Test sampling from empty collections."""
assert get_sample_values([]) == []
assert get_sample_values({}) == {}
assert get_sample_values(set()) == []
def test_max_items_larger_than_collection(self):
"""Test when max_items is larger than collection size."""
small_list = [1, 2]
result = get_sample_values(small_list, max_items=10)
assert result == [1, 2] # Should return all items
class TestIntegrationScenarios:
"""Integration test cases for complex data structures."""
def test_real_world_api_response(self):
"""Test analysis of realistic API response structure."""
api_response = {
"status": "success",
"data": {
"users": [
{
"id": 1,
"name": "John Doe",
"email": "john@example.com",
"created_at": "2023-01-15T10:30:00Z",
"metadata": {"login_count": 42, "preferences": {"theme": "dark", "notifications": True}},
},
{
"id": 2,
"name": "Jane Smith",
"email": "jane@example.com",
"created_at": "2023-02-01T14:20:00Z",
"metadata": {"login_count": 15, "preferences": {"theme": "light", "notifications": False}},
},
],
"pagination": {"page": 1, "per_page": 10, "total": 2},
},
}
result = get_data_structure(api_response, include_sample_values=True, include_sample_structure=True)
assert "structure" in result
assert "samples" in result
structure = result["structure"]
assert structure["status"] == "str"
assert "data" in structure
assert "users" in structure["data"]
assert "list(dict)" in structure["data"]["users"]
# Check that pagination structure is captured
pagination = structure["data"]["pagination"]
assert pagination["page"] == "int"
assert pagination["total"] == "int"
def test_mixed_data_types_analysis(self):
"""Test analysis of mixed data types."""
mixed_data = {
"strings": ["hello", "world"],
"numbers": [1, 2, 3.14, 5],
"booleans": [True, False, True],
"mixed_list": [1, "hello", True, None, {"nested": "value"}],
"json_strings": ['{"key": "value"}', '{"another": "json"}'],
"dates": ["has date in string", "another time value"],
"empty_structures": {"empty_list": [], "empty_dict": {}},
}
result = get_data_structure(mixed_data)
structure = result["structure"]
assert "list(str)" in structure["strings"]
assert "int|float" in structure["numbers"] or "float|int" in structure["numbers"]
assert "list(bool)" in structure["booleans"]
# The mixed list types may be in any order, so just check it contains list() and multiple types
mixed_list_str = structure["mixed_list"]
assert "list(" in mixed_list_str
assert "|" in mixed_list_str # Multiple types indicated by pipe
assert "str(json)" in structure["json_strings"]
assert "str(possible_date)" in structure["dates"]
assert structure["empty_structures"]["empty_list"] == "list(unknown)"
assert structure["empty_structures"]["empty_dict"] == {}
def test_deep_nesting_with_max_depth(self):
"""Test deeply nested structure with depth limits."""
def create_deep_structure(depth):
if depth == 0:
return "leaf_value"
return {"level": depth, "nested": create_deep_structure(depth - 1)}
deep_data = create_deep_structure(10)
result = get_data_structure(deep_data, max_depth=5)
# Should handle deep nesting gracefully
assert "structure" in result
assert "max_depth_reached" in str(result["structure"])
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_data_structure.py",
"license": "MIT License",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_lazy_load.py | import pytest
from langflow.utils.lazy_load import LazyLoadDictBase
class TestLazyLoadDictBase:
"""Test cases for LazyLoadDictBase class."""
class ConcreteLazyLoad(LazyLoadDictBase):
"""Concrete implementation for testing."""
def __init__(self, build_dict_return="__default__", get_type_dict_return=None):
super().__init__()
if build_dict_return == "__default__":
self.build_dict_return = {"test": "value"}
else:
self.build_dict_return = build_dict_return
self.get_type_dict_return = get_type_dict_return or {"type": "dict"}
self.build_dict_call_count = 0
def _build_dict(self):
self.build_dict_call_count += 1
return self.build_dict_return
def get_type_dict(self):
return self.get_type_dict_return
def test_lazy_load_dict_base_initialization(self):
"""Test proper initialization of LazyLoadDictBase."""
lazy_load = self.ConcreteLazyLoad()
assert lazy_load._all_types_dict is None
assert lazy_load.build_dict_call_count == 0
def test_all_types_dict_lazy_loading(self):
"""Test that all_types_dict is loaded lazily."""
test_dict = {"key1": "value1", "key2": "value2"}
lazy_load = self.ConcreteLazyLoad(build_dict_return=test_dict)
# First access should trigger _build_dict
result = lazy_load.all_types_dict
assert result == test_dict
assert lazy_load.build_dict_call_count == 1
assert lazy_load._all_types_dict == test_dict
def test_all_types_dict_caching(self):
"""Test that all_types_dict is cached after first access."""
test_dict = {"cached": "value"}
lazy_load = self.ConcreteLazyLoad(build_dict_return=test_dict)
# First access
result1 = lazy_load.all_types_dict
# Second access
result2 = lazy_load.all_types_dict
assert result1 == test_dict
assert result2 == test_dict
assert result1 is result2 # Same object reference
assert lazy_load.build_dict_call_count == 1 # Only called once
def test_all_types_dict_multiple_accesses(self):
"""Test multiple accesses to all_types_dict property."""
test_dict = {"multi": "access", "test": "value"}
lazy_load = self.ConcreteLazyLoad(build_dict_return=test_dict)
# Multiple accesses
for _ in range(5):
result = lazy_load.all_types_dict
assert result == test_dict
# _build_dict should only be called once
assert lazy_load.build_dict_call_count == 1
def test_build_dict_not_implemented_error(self):
"""Test that _build_dict raises NotImplementedError in base class."""
base = LazyLoadDictBase()
with pytest.raises(NotImplementedError):
base._build_dict()
def test_get_type_dict_not_implemented_error(self):
"""Test that get_type_dict raises NotImplementedError in base class."""
base = LazyLoadDictBase()
with pytest.raises(NotImplementedError):
base.get_type_dict()
def test_get_type_dict_implementation(self):
"""Test that get_type_dict works correctly in concrete implementation."""
type_dict = {"component": "type", "data": "structure"}
lazy_load = self.ConcreteLazyLoad(get_type_dict_return=type_dict)
result = lazy_load.get_type_dict()
assert result == type_dict
def test_all_types_dict_with_empty_dict(self):
"""Test all_types_dict with empty dictionary from _build_dict."""
lazy_load = self.ConcreteLazyLoad(build_dict_return={})
result = lazy_load.all_types_dict
assert result == {}
assert lazy_load.build_dict_call_count == 1
def test_all_types_dict_with_none_from_build_dict(self):
"""Test all_types_dict when _build_dict returns None."""
lazy_load = self.ConcreteLazyLoad(build_dict_return=None)
result = lazy_load.all_types_dict
assert result is None
assert lazy_load.build_dict_call_count == 1
def test_all_types_dict_with_complex_data(self):
"""Test all_types_dict with complex nested data structures."""
complex_dict = {
"components": {"llm": ["OpenAI", "Anthropic"], "tools": {"python": "PythonTool", "api": "APITool"}},
"metadata": {"version": "1.0", "updated": "2023-01-01"},
}
lazy_load = self.ConcreteLazyLoad(build_dict_return=complex_dict)
result = lazy_load.all_types_dict
assert result == complex_dict
assert result["components"]["llm"] == ["OpenAI", "Anthropic"]
assert result["components"]["tools"]["python"] == "PythonTool"
assert lazy_load.build_dict_call_count == 1
def test_inheritance_behavior(self):
"""Test that inheritance works properly."""
class CustomLazyLoad(LazyLoadDictBase):
def _build_dict(self):
return {"custom": "implementation"}
def get_type_dict(self):
return {"custom": "type_dict"}
custom = CustomLazyLoad()
assert custom.all_types_dict == {"custom": "implementation"}
assert custom.get_type_dict() == {"custom": "type_dict"}
def test_all_types_dict_property_behavior(self):
"""Test that all_types_dict behaves as a proper property."""
lazy_load = self.ConcreteLazyLoad(build_dict_return={"prop": "test"})
# Check that it's a property and not a method
assert hasattr(type(lazy_load), "all_types_dict")
assert isinstance(type(lazy_load).all_types_dict, property)
# Accessing it should return the value, not a method
result = lazy_load.all_types_dict
assert callable(result) is False
assert result == {"prop": "test"}
def test_state_consistency(self):
"""Test that internal state remains consistent."""
lazy_load = self.ConcreteLazyLoad(build_dict_return={"state": "test"})
# Initially _all_types_dict should be None
assert lazy_load._all_types_dict is None
# After first access, it should be set
result = lazy_load.all_types_dict
assert lazy_load._all_types_dict is not None
assert lazy_load._all_types_dict == {"state": "test"}
# The returned value should be the same as internal state
assert result is lazy_load._all_types_dict
def test_manual_dict_assignment(self):
"""Test behavior when _all_types_dict is manually assigned."""
lazy_load = self.ConcreteLazyLoad(build_dict_return={"build": "dict"})
# Manually assign the dict
manual_dict = {"manual": "assignment"}
lazy_load._all_types_dict = manual_dict
# Should return the manually assigned dict, not call _build_dict
result = lazy_load.all_types_dict
assert result == manual_dict
assert lazy_load.build_dict_call_count == 0 # _build_dict not called
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_lazy_load.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_migration.py | from unittest.mock import Mock, patch
import pytest
import sqlalchemy as sa
from langflow.utils.migration import column_exists, constraint_exists, foreign_key_exists, table_exists
class TestTableExists:
"""Test cases for table_exists function."""
@patch("sqlalchemy.inspect")
def test_table_exists_true(self, mock_inspect):
"""Test when table exists."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = ["users", "posts", "comments"]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = table_exists("users", mock_conn)
assert result is True
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_table_names.assert_called_once()
@patch("sqlalchemy.inspect")
def test_table_exists_false(self, mock_inspect):
"""Test when table does not exist."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = ["users", "posts", "comments"]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = table_exists("nonexistent_table", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_table_names.assert_called_once()
@patch("sqlalchemy.inspect")
def test_table_exists_empty_database(self, mock_inspect):
"""Test when database has no tables."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = []
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = table_exists("any_table", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_table_names.assert_called_once()
@patch("sqlalchemy.inspect")
def test_table_exists_case_sensitive(self, mock_inspect):
"""Test case sensitivity of table name matching."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = ["Users", "posts"]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Exact case match should work
result = table_exists("Users", mock_conn)
assert result is True
# Different case should not match (depends on database implementation)
result = table_exists("users", mock_conn)
assert result is False
@patch("sqlalchemy.inspect")
def test_table_exists_with_engine(self, mock_inspect):
"""Test function works with both engine and connection objects."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = ["test_table"]
mock_inspect.return_value = mock_inspector
mock_engine = Mock(spec=sa.engine.Engine)
result = table_exists("test_table", mock_engine)
assert result is True
mock_inspect.assert_called_once_with(mock_engine)
class TestColumnExists:
"""Test cases for column_exists function."""
@patch("sqlalchemy.inspect")
def test_column_exists_true(self, mock_inspect):
"""Test when column exists in table."""
mock_inspector = Mock()
mock_inspector.get_columns.return_value = [
{"name": "id", "type": "INTEGER"},
{"name": "username", "type": "VARCHAR"},
{"name": "email", "type": "VARCHAR"},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = column_exists("users", "username", mock_conn)
assert result is True
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_columns.assert_called_once_with("users")
@patch("sqlalchemy.inspect")
def test_column_exists_false(self, mock_inspect):
"""Test when column does not exist in table."""
mock_inspector = Mock()
mock_inspector.get_columns.return_value = [
{"name": "id", "type": "INTEGER"},
{"name": "username", "type": "VARCHAR"},
{"name": "email", "type": "VARCHAR"},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = column_exists("users", "nonexistent_column", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_columns.assert_called_once_with("users")
@patch("sqlalchemy.inspect")
def test_column_exists_empty_table(self, mock_inspect):
"""Test when table has no columns."""
mock_inspector = Mock()
mock_inspector.get_columns.return_value = []
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = column_exists("empty_table", "any_column", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_columns.assert_called_once_with("empty_table")
@patch("sqlalchemy.inspect")
def test_column_exists_case_sensitive(self, mock_inspect):
"""Test case sensitivity of column name matching."""
mock_inspector = Mock()
mock_inspector.get_columns.return_value = [
{"name": "UserName", "type": "VARCHAR"},
{"name": "email", "type": "VARCHAR"},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Exact case match should work
result = column_exists("users", "UserName", mock_conn)
assert result is True
# Different case should not match
result = column_exists("users", "username", mock_conn)
assert result is False
@patch("sqlalchemy.inspect")
def test_column_exists_multiple_calls(self, mock_inspect):
"""Test multiple column existence checks on same table."""
mock_inspector = Mock()
mock_inspector.get_columns.return_value = [
{"name": "id", "type": "INTEGER"},
{"name": "name", "type": "VARCHAR"},
{"name": "created_at", "type": "TIMESTAMP"},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Multiple checks should all work
assert column_exists("posts", "id", mock_conn) is True
assert column_exists("posts", "name", mock_conn) is True
assert column_exists("posts", "created_at", mock_conn) is True
assert column_exists("posts", "updated_at", mock_conn) is False
# Inspector should be called for each check
assert mock_inspect.call_count == 4
assert mock_inspector.get_columns.call_count == 4
class TestForeignKeyExists:
"""Test cases for foreign_key_exists function."""
@patch("sqlalchemy.inspect")
def test_foreign_key_exists_true(self, mock_inspect):
"""Test when foreign key exists."""
mock_inspector = Mock()
mock_inspector.get_foreign_keys.return_value = [
{"name": "fk_user_id", "constrained_columns": ["user_id"]},
{"name": "fk_category_id", "constrained_columns": ["category_id"]},
{"name": "fk_author_id", "constrained_columns": ["author_id"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = foreign_key_exists("posts", "fk_user_id", mock_conn)
assert result is True
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_foreign_keys.assert_called_once_with("posts")
@patch("sqlalchemy.inspect")
def test_foreign_key_exists_false(self, mock_inspect):
"""Test when foreign key does not exist."""
mock_inspector = Mock()
mock_inspector.get_foreign_keys.return_value = [
{"name": "fk_user_id", "constrained_columns": ["user_id"]},
{"name": "fk_category_id", "constrained_columns": ["category_id"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = foreign_key_exists("posts", "fk_nonexistent", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_foreign_keys.assert_called_once_with("posts")
@patch("sqlalchemy.inspect")
def test_foreign_key_exists_no_foreign_keys(self, mock_inspect):
"""Test when table has no foreign keys."""
mock_inspector = Mock()
mock_inspector.get_foreign_keys.return_value = []
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = foreign_key_exists("simple_table", "any_fk", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_foreign_keys.assert_called_once_with("simple_table")
@patch("sqlalchemy.inspect")
def test_foreign_key_exists_none_names(self, mock_inspect):
"""Test when foreign keys have None names."""
mock_inspector = Mock()
mock_inspector.get_foreign_keys.return_value = [
{"name": None, "constrained_columns": ["user_id"]},
{"name": "fk_valid", "constrained_columns": ["category_id"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = foreign_key_exists("posts", "fk_valid", mock_conn)
assert result is True
# Should handle None names gracefully
result = foreign_key_exists("posts", None, mock_conn)
assert result is True # None should match None
@patch("sqlalchemy.inspect")
def test_foreign_key_exists_case_sensitive(self, mock_inspect):
"""Test case sensitivity of foreign key name matching."""
mock_inspector = Mock()
mock_inspector.get_foreign_keys.return_value = [{"name": "FK_User_ID", "constrained_columns": ["user_id"]}]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Exact case match should work
result = foreign_key_exists("posts", "FK_User_ID", mock_conn)
assert result is True
# Different case should not match
result = foreign_key_exists("posts", "fk_user_id", mock_conn)
assert result is False
class TestConstraintExists:
"""Test cases for constraint_exists function."""
@patch("sqlalchemy.inspect")
def test_constraint_exists_true(self, mock_inspect):
"""Test when constraint exists."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = [
{"name": "uq_username", "column_names": ["username"]},
{"name": "uq_email", "column_names": ["email"]},
{"name": "uq_composite", "column_names": ["first_name", "last_name"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = constraint_exists("users", "uq_username", mock_conn)
assert result is True
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_unique_constraints.assert_called_once_with("users")
@patch("sqlalchemy.inspect")
def test_constraint_exists_false(self, mock_inspect):
"""Test when constraint does not exist."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = [
{"name": "uq_username", "column_names": ["username"]},
{"name": "uq_email", "column_names": ["email"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = constraint_exists("users", "uq_nonexistent", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_unique_constraints.assert_called_once_with("users")
@patch("sqlalchemy.inspect")
def test_constraint_exists_no_constraints(self, mock_inspect):
"""Test when table has no unique constraints."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = []
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = constraint_exists("simple_table", "any_constraint", mock_conn)
assert result is False
mock_inspect.assert_called_once_with(mock_conn)
mock_inspector.get_unique_constraints.assert_called_once_with("simple_table")
@patch("sqlalchemy.inspect")
def test_constraint_exists_none_names(self, mock_inspect):
"""Test when constraints have None names."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = [
{"name": None, "column_names": ["id"]},
{"name": "uq_valid", "column_names": ["username"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
result = constraint_exists("users", "uq_valid", mock_conn)
assert result is True
# Should handle None names gracefully
result = constraint_exists("users", None, mock_conn)
assert result is True # None should match None
@patch("sqlalchemy.inspect")
def test_constraint_exists_case_sensitive(self, mock_inspect):
"""Test case sensitivity of constraint name matching."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = [{"name": "UQ_Username", "column_names": ["username"]}]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Exact case match should work
result = constraint_exists("users", "UQ_Username", mock_conn)
assert result is True
# Different case should not match
result = constraint_exists("users", "uq_username", mock_conn)
assert result is False
@patch("sqlalchemy.inspect")
def test_constraint_exists_multiple_constraints(self, mock_inspect):
"""Test with multiple unique constraints."""
mock_inspector = Mock()
mock_inspector.get_unique_constraints.return_value = [
{"name": "uq_single_column", "column_names": ["email"]},
{"name": "uq_composite", "column_names": ["first_name", "last_name", "birth_date"]},
{"name": "uq_another", "column_names": ["phone_number"]},
]
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# All existing constraints should be found
assert constraint_exists("users", "uq_single_column", mock_conn) is True
assert constraint_exists("users", "uq_composite", mock_conn) is True
assert constraint_exists("users", "uq_another", mock_conn) is True
# Non-existing constraint should return False
assert constraint_exists("users", "uq_missing", mock_conn) is False
class TestIntegrationScenarios:
"""Integration test scenarios for migration utilities."""
@patch("sqlalchemy.inspect")
def test_complete_migration_check_workflow(self, mock_inspect):
"""Test a complete migration check workflow."""
mock_inspector = Mock()
# Setup mock responses for different calls
def get_table_names_side_effect():
return ["users", "posts", "comments", "categories"]
def get_columns_side_effect(table_name):
columns_map = {
"users": [
{"name": "id", "type": "INTEGER"},
{"name": "username", "type": "VARCHAR"},
{"name": "email", "type": "VARCHAR"},
{"name": "created_at", "type": "TIMESTAMP"},
],
"posts": [
{"name": "id", "type": "INTEGER"},
{"name": "title", "type": "VARCHAR"},
{"name": "content", "type": "TEXT"},
{"name": "user_id", "type": "INTEGER"},
],
}
return columns_map.get(table_name, [])
def get_foreign_keys_side_effect(table_name):
fk_map = {"posts": [{"name": "fk_posts_user_id", "constrained_columns": ["user_id"]}]}
return fk_map.get(table_name, [])
def get_unique_constraints_side_effect(table_name):
constraint_map = {
"users": [
{"name": "uq_users_username", "column_names": ["username"]},
{"name": "uq_users_email", "column_names": ["email"]},
]
}
return constraint_map.get(table_name, [])
mock_inspector.get_table_names.side_effect = get_table_names_side_effect
mock_inspector.get_columns.side_effect = get_columns_side_effect
mock_inspector.get_foreign_keys.side_effect = get_foreign_keys_side_effect
mock_inspector.get_unique_constraints.side_effect = get_unique_constraints_side_effect
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Test complete migration check scenario
# Check if tables exist
assert table_exists("users", mock_conn) is True
assert table_exists("posts", mock_conn) is True
assert table_exists("nonexistent_table", mock_conn) is False
# Check if required columns exist
assert column_exists("users", "username", mock_conn) is True
assert column_exists("users", "email", mock_conn) is True
assert column_exists("posts", "user_id", mock_conn) is True
assert column_exists("posts", "nonexistent_column", mock_conn) is False
# Check if foreign keys exist
assert foreign_key_exists("posts", "fk_posts_user_id", mock_conn) is True
assert foreign_key_exists("posts", "nonexistent_fk", mock_conn) is False
# Check if constraints exist
assert constraint_exists("users", "uq_users_username", mock_conn) is True
assert constraint_exists("users", "uq_users_email", mock_conn) is True
assert constraint_exists("users", "nonexistent_constraint", mock_conn) is False
@patch("sqlalchemy.inspect")
def test_error_handling_in_inspection(self, mock_inspect):
"""Test error handling when SQLAlchemy inspection fails."""
mock_inspector = Mock()
mock_inspector.get_table_names.side_effect = Exception("Database connection error")
mock_inspect.return_value = mock_inspector
mock_conn = Mock()
# Should propagate the exception
with pytest.raises(Exception, match="Database connection error"):
table_exists("any_table", mock_conn)
@patch("sqlalchemy.inspect")
def test_with_different_connection_types(self, mock_inspect):
"""Test functions work with different SQLAlchemy connection types."""
mock_inspector = Mock()
mock_inspector.get_table_names.return_value = ["test_table"]
mock_inspect.return_value = mock_inspector
# Test with mock engine
mock_engine = Mock(spec=sa.engine.Engine)
result = table_exists("test_table", mock_engine)
assert result is True
# Test with mock connection
mock_connection = Mock(spec=sa.engine.Connection)
result = table_exists("test_table", mock_connection)
assert result is True
# Verify inspect was called with the connection objects
assert mock_inspect.call_count == 2
mock_inspect.assert_any_call(mock_engine)
mock_inspect.assert_any_call(mock_connection)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_migration.py",
"license": "MIT License",
"lines": 384,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_payload.py | from unittest.mock import MagicMock
from langflow.utils.payload import extract_input_variables, get_root_vertex
class TestExtractInputVariables:
"""Test cases for extract_input_variables function."""
def test_extract_input_variables_prompt_type(self):
"""Test extracting input variables from prompt type node."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Hello {name}, welcome to {place}!"},
}
}
}
}
]
result = extract_input_variables(nodes)
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == ["name", "place"]
def test_extract_input_variables_few_shot_type(self):
"""Test extracting input variables from few_shot type node."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "few_shot",
"prefix": {"value": "This is {prefix_var}"},
"suffix": {"value": "And this is {suffix_var}"},
}
}
}
}
]
result = extract_input_variables(nodes)
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == ["prefix_var", "suffix_var"]
def test_extract_input_variables_other_type(self):
"""Test extracting input variables from other type nodes."""
nodes = [{"data": {"node": {"template": {"input_variables": {"value": []}, "_type": "other"}}}}]
result = extract_input_variables(nodes)
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == []
def test_extract_input_variables_no_input_variables_field(self):
"""Test handling nodes without input_variables field."""
nodes = [{"data": {"node": {"template": {"_type": "prompt", "template": {"value": "Hello {name}!"}}}}}]
# Should not raise exception due to contextlib.suppress
result = extract_input_variables(nodes)
assert result == nodes # Should return unchanged
def test_extract_input_variables_multiple_nodes(self):
"""Test extracting input variables from multiple nodes."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Hello {user}!"},
}
}
}
},
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "few_shot",
"prefix": {"value": "Prefix with {var1}"},
"suffix": {"value": "Suffix with {var2}"},
}
}
}
},
]
result = extract_input_variables(nodes)
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == ["user"]
assert result[1]["data"]["node"]["template"]["input_variables"]["value"] == ["var1", "var2"]
def test_extract_input_variables_nested_brackets(self):
"""Test extracting variables with nested brackets."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Process {data.field} and {other_var}"},
}
}
}
}
]
result = extract_input_variables(nodes)
assert "data.field" in result[0]["data"]["node"]["template"]["input_variables"]["value"]
assert "other_var" in result[0]["data"]["node"]["template"]["input_variables"]["value"]
def test_extract_input_variables_no_variables(self):
"""Test extracting from template with no variables."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Hello World! No variables here."},
}
}
}
}
]
result = extract_input_variables(nodes)
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == []
def test_extract_input_variables_duplicate_variables(self):
"""Test extracting duplicate variables."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Hello {name}, how are you {name}?"},
}
}
}
}
]
result = extract_input_variables(nodes)
# Should contain duplicates as found by regex
assert result[0]["data"]["node"]["template"]["input_variables"]["value"] == ["name", "name"]
def test_extract_input_variables_malformed_node(self):
"""Test handling malformed node structure."""
nodes = [
{"malformed": "data"},
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Hello {valid}!"},
}
}
}
},
]
# Should not raise exception and process valid nodes
result = extract_input_variables(nodes)
assert len(result) == 2
# Second node should be processed correctly
assert result[1]["data"]["node"]["template"]["input_variables"]["value"] == ["valid"]
def test_extract_input_variables_empty_list(self):
"""Test extracting from empty nodes list."""
nodes = []
result = extract_input_variables(nodes)
assert result == []
def test_extract_input_variables_special_characters(self):
"""Test extracting variables with special characters."""
nodes = [
{
"data": {
"node": {
"template": {
"input_variables": {"value": []},
"_type": "prompt",
"template": {"value": "Variables: {var_1}, {var-2}, {var.3}"},
}
}
}
}
]
result = extract_input_variables(nodes)
variables = result[0]["data"]["node"]["template"]["input_variables"]["value"]
assert "var_1" in variables
assert "var-2" in variables
assert "var.3" in variables
class TestGetRootVertex:
"""Test cases for get_root_vertex function."""
def test_get_root_vertex_single_root(self):
"""Test getting root vertex when there's a single root."""
# Mock graph with edges
mock_graph = MagicMock()
mock_edge1 = MagicMock()
mock_edge1.source_id = "node1"
mock_edge2 = MagicMock()
mock_edge2.source_id = "node2"
mock_graph.edges = [mock_edge1, mock_edge2]
# Mock vertices
mock_vertex1 = MagicMock()
mock_vertex1.id = "root"
mock_vertex2 = MagicMock()
mock_vertex2.id = "node1"
mock_vertex3 = MagicMock()
mock_vertex3.id = "node2"
mock_graph.vertices = [mock_vertex1, mock_vertex2, mock_vertex3]
# The root should be the vertex not in incoming_edges
if callable(get_root_vertex):
_ = get_root_vertex(mock_graph)
# This test assumes get_root_vertex returns the vertex with no incoming edges
# The actual implementation would need to be verified
def test_get_root_vertex_no_edges(self):
"""Test getting root vertex when there are no edges."""
mock_graph = MagicMock()
mock_graph.edges = []
mock_vertex = MagicMock()
mock_vertex.id = "only_vertex"
mock_graph.vertices = [mock_vertex]
# When there are no edges, any vertex could be considered root
if callable(get_root_vertex):
_ = get_root_vertex(mock_graph)
def test_get_root_vertex_multiple_roots(self):
"""Test getting root vertex when there are multiple potential roots."""
mock_graph = MagicMock()
mock_edge = MagicMock()
mock_edge.source_id = "node2"
mock_graph.edges = [mock_edge]
# Mock vertices - both node1 and node3 could be roots
mock_vertex1 = MagicMock()
mock_vertex1.id = "node1"
mock_vertex2 = MagicMock()
mock_vertex2.id = "node2"
mock_vertex3 = MagicMock()
mock_vertex3.id = "node3"
mock_graph.vertices = [mock_vertex1, mock_vertex2, mock_vertex3]
if callable(get_root_vertex):
_ = get_root_vertex(mock_graph)
def test_get_root_vertex_empty_graph(self):
"""Test getting root vertex from empty graph."""
mock_graph = MagicMock()
mock_graph.edges = []
mock_graph.vertices = []
if callable(get_root_vertex):
_ = get_root_vertex(mock_graph)
# Should handle empty graph gracefully
def test_get_root_vertex_circular_dependencies(self):
"""Test getting root vertex with circular dependencies."""
mock_graph = MagicMock()
# Create circular edges: node1 -> node2 -> node1
mock_edge1 = MagicMock()
mock_edge1.source_id = "node1"
mock_edge2 = MagicMock()
mock_edge2.source_id = "node2"
mock_graph.edges = [mock_edge1, mock_edge2]
mock_vertex1 = MagicMock()
mock_vertex1.id = "node1"
mock_vertex2 = MagicMock()
mock_vertex2.id = "node2"
mock_graph.vertices = [mock_vertex1, mock_vertex2]
if callable(get_root_vertex):
_ = get_root_vertex(mock_graph)
# Should handle circular dependencies gracefully
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_payload.py",
"license": "MIT License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_schemas.py | import enum
from unittest.mock import Mock, patch
import pytest
from langflow.utils.schemas import ChatOutputResponse, ContainsEnumMeta, DataOutputResponse, File
from pydantic import ValidationError
class TestFile:
"""Test cases for File TypedDict."""
def test_file_typed_dict_structure(self):
"""Test that File TypedDict has correct structure."""
# TypedDict is mainly for type hints, so we test basic usage
file_data: File = {"path": "/path/to/file.txt", "name": "file.txt", "type": "txt"}
assert file_data["path"] == "/path/to/file.txt"
assert file_data["name"] == "file.txt"
assert file_data["type"] == "txt"
def test_file_with_image_type(self):
"""Test File with image type."""
file_data: File = {"path": "/images/photo.jpg", "name": "photo.jpg", "type": "jpg"}
assert file_data["type"] == "jpg"
def test_file_required_fields(self):
"""Test that File requires all specified fields."""
# This is a compile-time check via TypedDict
# At runtime, we can still create incomplete dicts
incomplete_file = {
"path": "/some/path",
"name": "filename",
# missing "type"
}
# TypedDict doesn't enforce at runtime, but the schema should require all fields
assert "path" in incomplete_file
assert "name" in incomplete_file
class TestChatOutputResponse:
"""Test cases for ChatOutputResponse Pydantic model."""
def test_basic_chat_response_creation(self):
"""Test creating basic chat response."""
response = ChatOutputResponse(message="Hello, world!", type="text")
assert response.message == "Hello, world!"
assert response.sender == "Machine" # Default value
assert response.sender_name == "AI" # Default value
assert response.type == "text"
assert response.files == []
assert response.session_id is None
assert response.stream_url is None
assert response.component_id is None
def test_chat_response_with_all_fields(self):
"""Test creating chat response with all fields."""
files = [{"path": "/test.txt", "name": "test.txt", "type": "txt"}]
response = ChatOutputResponse(
message="Test message",
sender="Human",
sender_name="User",
session_id="session-123",
stream_url="http://stream.url",
component_id="comp-456",
files=files,
type="text",
)
assert response.message == "Test message"
assert response.sender == "Human"
assert response.sender_name == "User"
assert response.session_id == "session-123"
assert response.stream_url == "http://stream.url"
assert response.component_id == "comp-456"
assert response.files == files
assert response.type == "text"
def test_chat_response_with_list_message(self):
"""Test chat response with list message."""
message_list = ["Hello", {"type": "text", "content": "world"}]
response = ChatOutputResponse(
message=message_list,
sender="Human", # Use non-AI sender to avoid message validation
type="mixed",
)
assert response.message == message_list
def test_validate_files_valid_files(self):
"""Test file validation with valid files."""
files = [
{"path": "/file1.txt", "name": "file1.txt", "type": "txt"},
{"path": "/file2.jpg", "name": "file2.jpg", "type": "jpg"},
]
response = ChatOutputResponse(message="Test", files=files, type="text")
assert len(response.files) == 2
assert response.files[0]["name"] == "file1.txt"
assert response.files[1]["type"] == "jpg"
def test_validate_files_missing_name_and_type(self):
"""Test file validation when name and type are missing."""
files = [{"path": "/documents/report.pdf"}]
with (
patch("langflow.utils.schemas.TEXT_FILE_TYPES", ["pdf", "txt"]),
patch("langflow.utils.schemas.IMG_FILE_TYPES", ["jpg", "png"]),
):
response = ChatOutputResponse(message="Test", files=files, type="text")
# Should extract name from path
assert response.files[0]["name"] == "report.pdf"
# Should extract type from extension
assert response.files[0]["type"] == "pdf"
def test_validate_files_missing_path_raises_error(self):
"""Test that missing path raises validation error."""
files = [{"name": "file.txt", "type": "txt"}]
with pytest.raises(ValidationError, match="File path is required"):
ChatOutputResponse(message="Test", files=files, type="text")
def test_validate_files_non_dict_raises_error(self):
"""Test that non-dict files raise validation error."""
files = ["not_a_dict"]
with pytest.raises(ValidationError, match="Files must be a list of dictionaries"):
ChatOutputResponse(message="Test", files=files, type="text")
def test_validate_files_unknown_type_raises_error(self):
"""Test that unknown file type raises validation error."""
files = [{"path": "/unknown/file.xyz"}]
with (
patch("langflow.utils.schemas.TEXT_FILE_TYPES", ["txt"]),
patch("langflow.utils.schemas.IMG_FILE_TYPES", ["jpg"]),
pytest.raises(ValidationError, match="File type is required"),
):
ChatOutputResponse(message="Test", files=files, type="text")
def test_validate_files_empty_list(self):
"""Test validation with empty files list."""
response = ChatOutputResponse(message="Test", files=[], type="text")
assert response.files == []
def test_validate_files_none(self):
"""Test validation with None files."""
# None is not valid for files field, it should use the default
response = ChatOutputResponse(message="Test", type="text")
# Should be set to default empty list
assert response.files == []
def test_validate_files_type_detection_in_path(self):
"""Test file type detection when extension not clear but type in path."""
files = [{"path": "/images/photo_jpg_compressed"}]
with (
patch("langflow.utils.schemas.TEXT_FILE_TYPES", ["txt"]),
patch("langflow.utils.schemas.IMG_FILE_TYPES", ["jpg", "png"]),
):
response = ChatOutputResponse(message="Test", files=files, type="text")
# Should detect 'jpg' in path
assert response.files[0]["type"] == "jpg"
@patch.object(ChatOutputResponse, "__init__")
def test_from_message_class_method(self, mock_init):
"""Test creating ChatOutputResponse from message."""
mock_init.return_value = None
mock_message = Mock()
mock_message.content = "Hello from message"
ChatOutputResponse.from_message(mock_message)
# Verify __init__ was called with correct parameters
mock_init.assert_called_once_with(message="Hello from message", sender="Machine", sender_name="AI")
@patch.object(ChatOutputResponse, "__init__")
def test_from_message_with_custom_sender(self, mock_init):
"""Test creating ChatOutputResponse from message with custom sender."""
mock_init.return_value = None
mock_message = Mock()
mock_message.content = "Custom message"
ChatOutputResponse.from_message(mock_message, sender="Human", sender_name="User")
# Verify __init__ was called with correct parameters
mock_init.assert_called_once_with(message="Custom message", sender="Human", sender_name="User")
def test_validate_message_ai_sender_newline_formatting(self):
"""Test message validation for AI sender with newline formatting."""
response = ChatOutputResponse(message="Line 1\nLine 2\nLine 3", sender="Machine", type="text")
# Should convert single \n to \n\n for markdown compliance
expected_message = "Line 1\n\nLine 2\n\nLine 3"
assert response.message == expected_message
def test_validate_message_ai_sender_existing_double_newlines(self):
"""Test message validation with existing double newlines."""
response = ChatOutputResponse(message="Line 1\n\nLine 2\n\nLine 3", sender="Machine", type="text")
# Should not add extra newlines where double newlines already exist
expected_message = "Line 1\n\nLine 2\n\nLine 3"
assert response.message == expected_message
def test_validate_message_non_ai_sender_unchanged(self):
"""Test that non-AI sender messages are unchanged."""
original_message = "Line 1\nLine 2\nLine 3"
response = ChatOutputResponse(message=original_message, sender="Human", type="text")
# Should remain unchanged for non-AI senders
assert response.message == original_message
def test_validate_message_complex_newline_patterns(self):
"""Test message validation with complex newline patterns."""
response = ChatOutputResponse(
message="Para 1\n\nPara 2\nLine in para 2\n\n\nPara 3", sender="Machine", type="text"
)
# The actual logic: replace \n\n with \n, then replace \n with \n\n
# "Para 1\n\nPara 2\nLine in para 2\n\n\nPara 3"
# -> "Para 1\nPara 2\nLine in para 2\n\nPara 3" (replace \n\n with \n)
# -> "Para 1\n\nPara 2\n\nLine in para 2\n\n\n\nPara 3" (replace \n with \n\n)
expected_message = "Para 1\n\nPara 2\n\nLine in para 2\n\n\n\nPara 3"
assert response.message == expected_message
def test_message_validation_with_list_message(self):
"""Test message validation when message is a list."""
message_list = ["Hello", "World"]
response = ChatOutputResponse(
message=message_list,
sender="Human", # Use non-AI sender to avoid validation error
type="text",
)
# List messages should not be processed for newlines when sender is not AI
assert response.message == message_list
class TestDataOutputResponse:
"""Test cases for DataOutputResponse Pydantic model."""
def test_basic_data_response_creation(self):
"""Test creating basic data output response."""
data = [{"key": "value"}, {"another": "item"}]
response = DataOutputResponse(data=data)
assert response.data == data
def test_data_response_with_none_values(self):
"""Test data response with None values in list."""
data = [{"key": "value"}, None, {"another": "item"}]
response = DataOutputResponse(data=data)
assert response.data == data
assert response.data[1] is None
def test_data_response_empty_list(self):
"""Test data response with empty list."""
response = DataOutputResponse(data=[])
assert response.data == []
def test_data_response_all_none(self):
"""Test data response with all None values."""
data = [None, None, None]
response = DataOutputResponse(data=data)
assert response.data == data
assert all(item is None for item in response.data)
def test_data_response_complex_dicts(self):
"""Test data response with complex dictionary structures."""
data = [{"nested": {"key": "value"}, "list": [1, 2, 3], "number": 42, "boolean": True}, {"simple": "string"}]
response = DataOutputResponse(data=data)
assert response.data == data
assert response.data[0]["nested"]["key"] == "value"
assert response.data[0]["list"] == [1, 2, 3]
class TestContainsEnumMeta:
"""Test cases for ContainsEnumMeta metaclass."""
def test_enum_with_contains_meta(self):
"""Test enum using ContainsEnumMeta."""
class TestEnum(enum.Enum, metaclass=ContainsEnumMeta):
VALUE_A = "a"
VALUE_B = "b"
VALUE_C = "c"
# Test contains functionality
assert "a" in TestEnum
assert "b" in TestEnum
assert "c" in TestEnum
assert "d" not in TestEnum
assert "invalid" not in TestEnum
def test_enum_contains_with_different_types(self):
"""Test enum contains with different value types."""
class MixedEnum(enum.Enum, metaclass=ContainsEnumMeta):
STRING_VAL = "string"
INT_VAL = 42
FLOAT_VAL = 3.14
assert "string" in MixedEnum
assert 42 in MixedEnum
assert 3.14 in MixedEnum
assert "not_string" not in MixedEnum
assert 999 not in MixedEnum
def test_enum_contains_with_duplicate_values(self):
"""Test enum contains when enum has aliases."""
class AliasEnum(enum.Enum, metaclass=ContainsEnumMeta):
FIRST = "value"
SECOND = "value" # Alias for FIRST # noqa: PIE796
THIRD = "other"
assert "value" in AliasEnum
assert "other" in AliasEnum
assert "nonexistent" not in AliasEnum
def test_enum_contains_error_handling(self):
"""Test that contains handles ValueError gracefully."""
class StrictEnum(enum.Enum, metaclass=ContainsEnumMeta):
def __new__(cls, value):
if not isinstance(value, str):
msg = "Only strings allowed"
raise TypeError(msg)
obj = object.__new__(cls)
obj._value_ = value
return obj
VALID = "valid"
assert "valid" in StrictEnum
assert 123 not in StrictEnum # Should return False, not raise
def test_enum_normal_functionality_preserved(self):
"""Test that normal enum functionality is preserved."""
class NormalEnum(enum.Enum, metaclass=ContainsEnumMeta):
OPTION_1 = "opt1"
OPTION_2 = "opt2"
# Normal enum operations should still work
assert NormalEnum.OPTION_1.value == "opt1"
assert NormalEnum("opt1") == NormalEnum.OPTION_1
assert list(NormalEnum) == [NormalEnum.OPTION_1, NormalEnum.OPTION_2]
def test_enum_inheritance_with_contains_meta(self):
"""Test enum inheritance with ContainsEnumMeta."""
class BaseEnum(enum.Enum, metaclass=ContainsEnumMeta):
BASE_VALUE = "base"
# Test that the metaclass works for the base enum
assert "base" in BaseEnum
assert "invalid" not in BaseEnum
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_schemas.py",
"license": "MIT License",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_util.py | import inspect
from unittest.mock import Mock, patch
import pytest
from langflow.schema.data import Data
from langflow.utils.util import (
add_options_to_field,
build_loader_repr_from_data,
build_template_from_function,
build_template_from_method,
check_list_type,
escape_json_dump,
find_closest_match,
format_dict,
get_base_classes,
get_default_factory,
get_formatted_type,
get_type,
get_type_from_union_literal,
is_class_method,
is_multiline_field,
is_password_field,
remove_ansi_escape_codes,
remove_optional_wrapper,
replace_default_value_with_actual,
replace_mapping_with_dict,
set_dict_file_attributes,
set_headers_value,
should_show_field,
sync_to_async,
unescape_string,
update_settings,
update_verbose,
)
class TestUnescapeString:
"""Test cases for unescape_string function."""
def test_unescape_single_newline(self):
"""Test unescaping single newline character."""
result = unescape_string("Line 1\\nLine 2")
assert result == "Line 1\nLine 2"
def test_unescape_multiple_newlines(self):
"""Test unescaping multiple newline characters."""
result = unescape_string("Line 1\\nLine 2\\nLine 3")
assert result == "Line 1\nLine 2\nLine 3"
def test_unescape_no_newlines(self):
"""Test string with no newline characters."""
result = unescape_string("Simple string")
assert result == "Simple string"
def test_unescape_empty_string(self):
"""Test empty string."""
result = unescape_string("")
assert result == ""
def test_unescape_mixed_content(self):
"""Test string with mixed content including newlines."""
result = unescape_string("Hello\\nWorld\\nThis is a test")
assert result == "Hello\nWorld\nThis is a test"
class TestRemoveAnsiEscapeCodes:
"""Test cases for remove_ansi_escape_codes function."""
def test_remove_color_codes(self):
"""Test removing ANSI color escape codes."""
text_with_colors = "\x1b[31mRed text\x1b[0m"
result = remove_ansi_escape_codes(text_with_colors)
assert result == "Red text"
def test_remove_multiple_codes(self):
"""Test removing multiple ANSI escape codes."""
text = "\x1b[1m\x1b[31mBold Red\x1b[0m\x1b[32mGreen\x1b[0m"
result = remove_ansi_escape_codes(text)
assert result == "Bold RedGreen"
def test_no_escape_codes(self):
"""Test text without ANSI escape codes."""
plain_text = "Plain text without codes"
result = remove_ansi_escape_codes(plain_text)
assert result == plain_text
def test_empty_string(self):
"""Test empty string."""
result = remove_ansi_escape_codes("")
assert result == ""
def test_complex_ansi_codes(self):
"""Test complex ANSI escape sequences."""
complex_text = "\x1b[1;4;31mBold underlined red\x1b[0m normal text"
result = remove_ansi_escape_codes(complex_text)
assert result == "Bold underlined red normal text"
class TestBuildTemplateFromFunction:
"""Test cases for build_template_from_function function."""
def test_function_not_found(self):
"""Test when function name is not in type_to_loader_dict."""
type_dict = {}
with pytest.raises(ValueError, match="TestName not found"):
build_template_from_function("TestName", type_dict)
@patch("lfx.utils.util.parse")
@patch("lfx.utils.util.get_default_factory")
@patch("lfx.utils.util.get_base_classes")
@patch("lfx.utils.util.format_dict")
def test_successful_template_build(
self, mock_format_dict, mock_get_base_classes, mock_get_default_factory, mock_parse
):
"""Test successful template building."""
# Mock class with model_fields
mock_class = Mock()
mock_class.__name__ = "TestClass"
# Create a mock base class with __module__
mock_base = Mock()
mock_base.__module__ = "test.module"
mock_class.__base__ = mock_base
mock_class.model_fields = {
"field1": Mock(),
"callback_manager": Mock(), # Should be skipped
}
# Mock field representation
field_mock = mock_class.model_fields["field1"]
field_mock.__repr_args__ = Mock(return_value=[("default_factory", "test_factory")])
# Mock loader function
mock_loader = Mock()
mock_loader.__annotations__ = {"return": mock_class}
type_dict = {"test_type": mock_loader}
# Mock dependencies
mock_parse.return_value = Mock(short_description="Test description", params={})
mock_get_default_factory.return_value = "default_value"
mock_get_base_classes.return_value = ["BaseClass"]
mock_format_dict.return_value = {"formatted": "dict"}
result = build_template_from_function("TestClass", type_dict)
assert result["template"] == {"formatted": "dict"}
assert result["description"] == "Test description"
assert result["base_classes"] == ["BaseClass"]
def test_add_function_base_class(self):
"""Test adding 'Callable' to base classes when add_function=True."""
mock_class = Mock()
mock_class.__name__ = "TestClass"
mock_class.model_fields = {}
mock_loader = Mock()
mock_loader.__annotations__ = {"return": mock_class}
type_dict = {"test_type": mock_loader}
with (
patch("lfx.utils.util.parse") as mock_parse,
patch("lfx.utils.util.get_base_classes") as mock_get_base_classes,
patch("lfx.utils.util.format_dict") as mock_format_dict,
):
mock_parse.return_value = Mock(short_description="Test", params={})
mock_get_base_classes.return_value = ["BaseClass"]
mock_format_dict.return_value = {}
result = build_template_from_function("TestClass", type_dict, add_function=True)
assert "Callable" in result["base_classes"]
class TestBuildTemplateFromMethod:
"""Test cases for build_template_from_method function."""
def test_class_not_found(self):
"""Test when class name is not in type_to_cls_dict."""
type_dict = {}
with pytest.raises(ValueError, match="TestClass not found"):
build_template_from_method("TestClass", "test_method", type_dict)
def test_method_not_found(self):
"""Test when method doesn't exist in class."""
mock_class = Mock()
mock_class.__name__ = "TestClass"
# Mock hasattr to return False
type_dict = {"test_type": mock_class}
with (
patch("builtins.hasattr", return_value=False),
pytest.raises(ValueError, match="Method test_method not found in class TestClass"),
):
build_template_from_method("TestClass", "test_method", type_dict)
@patch("lfx.utils.util.parse")
@patch("lfx.utils.util.get_base_classes")
@patch("lfx.utils.util.format_dict")
def test_successful_method_template_build(self, mock_format_dict, mock_get_base_classes, mock_parse):
"""Test successful method template building."""
# Create mock class with method
mock_class = Mock()
mock_class.__name__ = "TestClass"
# Create mock method with signature
mock_method = Mock()
mock_method.__doc__ = "Test method"
# Mock method signature
param1 = Mock()
param1.default = inspect.Parameter.empty
param1.annotation = str
param2 = Mock()
param2.default = "default_value"
param2.annotation = int
mock_sig = Mock()
mock_sig.parameters = {
"self": Mock(), # Should be ignored
"param1": param1,
"param2": param2,
}
mock_class.test_method = mock_method
type_dict = {"test_type": mock_class}
with patch("inspect.signature", return_value=mock_sig):
mock_parse.return_value = Mock(short_description="Test description")
mock_get_base_classes.return_value = ["BaseClass"]
mock_format_dict.return_value = {"formatted": "method_dict"}
result = build_template_from_method("TestClass", "test_method", type_dict)
assert result["template"] == {"formatted": "method_dict"}
assert result["description"] == "Test description"
assert result["base_classes"] == ["BaseClass"]
class TestGetBaseClasses:
"""Test cases for get_base_classes function."""
def test_class_with_bases(self):
"""Test class with base classes."""
class TestBase:
pass
class TestClass(TestBase):
pass
# Mock __module__ to avoid pydantic/abc filtering
TestBase.__module__ = "test.module"
result = get_base_classes(TestClass)
assert "TestClass" in result
assert "TestBase" in result
def test_class_without_bases(self):
"""Test class without base classes."""
class TestClass:
pass
result = get_base_classes(TestClass)
# Should include both object and TestClass
assert "TestClass" in result
assert len(result) >= 1
def test_filtered_base_classes(self):
"""Test that pydantic and abc bases are filtered out."""
# Create mock class with filtered bases
mock_base = Mock()
mock_base.__name__ = "FilteredBase"
mock_base.__module__ = "pydantic.main"
mock_class = Mock()
mock_class.__name__ = "TestClass"
mock_class.__bases__ = (mock_base,)
result = get_base_classes(mock_class)
assert "TestClass" in result
assert "FilteredBase" not in result
class TestGetDefaultFactory:
"""Test cases for get_default_factory function."""
def test_valid_function_pattern(self):
"""Test extracting function from valid pattern."""
with patch("importlib.import_module") as mock_import, patch("warnings.catch_warnings"):
mock_module = Mock()
mock_module.test_function = Mock(return_value="factory_result")
mock_import.return_value = mock_module
result = get_default_factory("test.module", "<function test_function>")
assert result == "factory_result"
# importlib.import_module might be called multiple times due to warnings imports
# Just check that test.module was called
calls = [call[0][0] for call in mock_import.call_args_list]
assert "test.module" in calls
def test_invalid_pattern(self):
"""Test with invalid function pattern."""
result = get_default_factory("test.module", "invalid_pattern")
assert result is None
def test_import_error(self):
"""Test handling import error."""
# The function doesn't explicitly handle import errors, so it will propagate
with pytest.raises((ImportError, ModuleNotFoundError)):
get_default_factory("nonexistent.module", "<function test_function>")
class TestUpdateVerbose:
"""Test cases for update_verbose function."""
def test_update_nested_verbose(self):
"""Test updating verbose in nested dictionary."""
test_dict = {"level1": {"verbose": False, "level2": {"verbose": True, "other_key": "value"}}, "verbose": False}
result = update_verbose(test_dict, new_value=True)
assert result["verbose"] is True
assert result["level1"]["verbose"] is True
assert result["level1"]["level2"]["verbose"] is True
assert result["level1"]["level2"]["other_key"] == "value"
def test_no_verbose_keys(self):
"""Test dictionary without verbose keys."""
test_dict = {"key1": "value1", "key2": {"nested": "value"}}
result = update_verbose(test_dict, new_value=True)
assert result == test_dict
def test_empty_dict(self):
"""Test empty dictionary."""
test_dict = {}
result = update_verbose(test_dict, new_value=True)
assert result == {}
class TestSyncToAsync:
"""Test cases for sync_to_async decorator."""
@pytest.mark.asyncio
async def test_sync_to_async_decorator(self):
"""Test converting sync function to async."""
@sync_to_async
def sync_function(x, y):
return x + y
result = await sync_function(2, 3)
assert result == 5
@pytest.mark.asyncio
async def test_sync_to_async_with_kwargs(self):
"""Test sync to async with keyword arguments."""
@sync_to_async
def sync_function(x, y=10):
return x * y
result = await sync_function(5, y=4)
assert result == 20
@pytest.mark.asyncio
async def test_sync_to_async_exception(self):
"""Test sync to async with exception."""
@sync_to_async
def failing_function():
msg = "Test error"
raise ValueError(msg)
with pytest.raises(ValueError, match="Test error"):
await failing_function()
class TestFormatDict:
"""Test cases for format_dict function."""
def test_format_dict_basic(self):
"""Test basic dictionary formatting."""
test_dict = {"_type": "test_type", "field1": {"type": "str", "required": True}}
with (
patch("lfx.utils.util.get_type") as mock_get_type,
patch("lfx.utils.util.should_show_field") as mock_show,
patch("lfx.utils.util.is_password_field") as mock_password,
patch("lfx.utils.util.is_multiline_field") as mock_multiline,
):
mock_get_type.return_value = "str"
mock_show.return_value = True
mock_password.return_value = False
mock_multiline.return_value = False
result = format_dict(test_dict)
assert "_type" in result
assert result["field1"]["type"] == "str"
assert result["field1"]["show"] is True
assert result["field1"]["password"] is False
assert result["field1"]["multiline"] is False
def test_format_dict_skips_basemodel(self):
"""Test that BaseModel types are skipped."""
test_dict = {"field1": {"type": "SomeBaseModel"}}
with patch("langflow.utils.util.get_type", return_value="SomeBaseModel"):
result = format_dict(test_dict)
# BaseModel fields are continued/skipped, so they retain original structure
# Check that the field wasn't processed by looking for formatting indicators
field_dict = result.get("field1", {})
assert "show" not in field_dict or "password" not in field_dict
class TestGetTypeFromUnionLiteral:
"""Test cases for get_type_from_union_literal function."""
def test_literal_union_to_str(self):
"""Test converting Union[Literal[...]] to str."""
union_type = "Union[Literal['option1'], Literal['option2']]"
result = get_type_from_union_literal(union_type)
assert result == "str"
def test_non_literal_unchanged(self):
"""Test non-literal types remain unchanged."""
non_literal = "Union[str, int]"
result = get_type_from_union_literal(non_literal)
assert result == non_literal
def test_simple_type_unchanged(self):
"""Test simple types remain unchanged."""
simple_type = "str"
result = get_type_from_union_literal(simple_type)
assert result == simple_type
class TestGetType:
"""Test cases for get_type function."""
def test_get_type_from_dict(self):
"""Test getting type from dictionary."""
value = {"type": "str"}
result = get_type(value)
assert result == "str"
def test_get_annotation_from_dict(self):
"""Test getting annotation from dictionary."""
value = {"annotation": int}
result = get_type(value)
assert result == "int"
def test_get_type_object(self):
"""Test getting type from type object."""
value = {"type": str}
result = get_type(value)
assert result == "str"
def test_empty_value(self):
"""Test empty value dictionary."""
value = {}
# This will cause an AttributeError in the actual implementation
# when get_type tries to call __name__ on None
with pytest.raises(AttributeError):
get_type(value)
class TestUtilityFunctions:
"""Test cases for various utility functions."""
def test_remove_optional_wrapper(self):
"""Test removing Optional wrapper from type string."""
optional_type = "Optional[str]"
result = remove_optional_wrapper(optional_type)
assert result == "str"
non_optional = "str"
result = remove_optional_wrapper(non_optional)
assert result == "str"
def test_check_list_type(self):
"""Test checking and modifying list types."""
value = {}
# Test List type
result = check_list_type("List[str]", value)
assert result == "str"
assert value["list"] is True
# Test non-list type
value = {}
result = check_list_type("str", value)
assert result == "str"
assert value["list"] is False
def test_replace_mapping_with_dict(self):
"""Test replacing Mapping with dict."""
mapping_type = "Mapping[str, Any]"
result = replace_mapping_with_dict(mapping_type)
assert result == "dict[str, Any]"
def test_get_formatted_type(self):
"""Test type formatting for specific keys."""
assert get_formatted_type("allowed_tools", "Any") == "Tool"
assert get_formatted_type("max_value_length", "Any") == "int"
assert get_formatted_type("other_field", "str") == "str"
def test_should_show_field(self):
"""Test field visibility logic."""
# Required field should show
assert should_show_field({"required": True}, "test_field") is True
# Password field should show
assert should_show_field({"required": False}, "password_field") is True
# Non-required, non-special field shouldn't show
assert should_show_field({"required": False}, "regular_field") is False
def test_is_password_field(self):
"""Test password field detection."""
assert is_password_field("password") is True
assert is_password_field("api_key") is True
assert is_password_field("token") is True
assert is_password_field("regular_field") is False
def test_is_multiline_field(self):
"""Test multiline field detection."""
assert is_multiline_field("template") is True
assert is_multiline_field("code") is True
assert is_multiline_field("headers") is True
assert is_multiline_field("regular_field") is False
def test_set_dict_file_attributes(self):
"""Test setting file attributes for dict fields."""
value = {}
set_dict_file_attributes(value)
assert value["type"] == "file"
assert value["fileTypes"] == [".json", ".yaml", ".yml"]
def test_replace_default_value_with_actual(self):
"""Test replacing default with value."""
value = {"default": "test_value", "other": "data"}
replace_default_value_with_actual(value)
assert value["value"] == "test_value"
assert "default" not in value
assert value["other"] == "data"
def test_set_headers_value(self):
"""Test setting headers value."""
value = {}
set_headers_value(value)
assert value["value"] == """{"Authorization": "Bearer <token>"}"""
def test_add_options_to_field(self):
"""Test adding options to specific fields."""
value = {}
with patch("lfx.utils.util.constants") as mock_constants:
mock_constants.OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4"]
add_options_to_field(value, "OpenAI", "model_name")
assert value["options"] == ["gpt-3.5-turbo", "gpt-4"]
assert value["list"] is True
assert value["value"] == "gpt-3.5-turbo"
class TestBuildLoaderReprFromData:
"""Test cases for build_loader_repr_from_data function."""
def test_build_repr_with_data(self):
"""Test building representation with data."""
mock_data1 = Mock(spec=Data)
mock_data1.text = "Short text"
mock_data2 = Mock(spec=Data)
mock_data2.text = "This is a longer text content"
data_list = [mock_data1, mock_data2]
result = build_loader_repr_from_data(data_list)
assert "2 data" in result
assert "Avg. Data Length" in result
assert "Data:" in result
def test_build_repr_empty_data(self):
"""Test building representation with empty data."""
result = build_loader_repr_from_data([])
assert result == "0 data"
def test_build_repr_none_data(self):
"""Test building representation with None data."""
result = build_loader_repr_from_data(None)
assert result == "0 data"
class TestUpdateSettings:
"""Test cases for update_settings function."""
@pytest.mark.asyncio
@patch("lfx.utils.util.get_settings_service")
async def test_update_settings_basic(self, mock_get_service):
"""Test basic settings update."""
mock_service = Mock()
mock_settings = Mock()
mock_service.settings = mock_settings
mock_get_service.return_value = mock_service
await update_settings(cache="redis")
# Verify the service was called and update_settings was called
mock_get_service.assert_called_once()
# The function calls update_settings multiple times with different parameters
assert mock_settings.update_settings.called
# Check that our specific call was made
mock_settings.update_settings.assert_any_call(cache="redis")
@pytest.mark.asyncio
@patch("lfx.utils.util.get_settings_service")
async def test_update_settings_from_yaml(self, mock_get_service):
"""Test updating settings from YAML config."""
mock_service = Mock()
mock_settings = Mock()
# Create an async mock
async def async_update_from_yaml(*_args, **_kwargs):
return None
mock_settings.update_from_yaml = Mock(side_effect=async_update_from_yaml)
mock_service.settings = mock_settings
mock_get_service.return_value = mock_service
await update_settings(config="config.yaml", dev=True)
# Verify the service was called and update_from_yaml was called
mock_get_service.assert_called_once()
mock_settings.update_from_yaml.assert_called_once_with("config.yaml", dev=True)
class TestUtilityMiscFunctions:
"""Test cases for miscellaneous utility functions."""
def test_is_class_method(self):
"""Test class method detection."""
class TestClass:
@classmethod
def class_method(cls):
return "class method"
def instance_method(self):
return "instance method"
# Note: This test may need adjustment based on actual implementation
# The function checks if func.__self__ is cls.__class__
bound_method = TestClass.class_method
result = is_class_method(bound_method, TestClass)
# The actual result depends on how the function is implemented
assert isinstance(result, bool)
def test_escape_json_dump(self):
"""Test JSON escaping."""
test_dict = {"key": "value", "nested": {"inner": "data"}}
result = escape_json_dump(test_dict)
assert "œ" in result # Quotes should be replaced
assert '"' not in result # No original quotes should remain
def test_find_closest_match(self):
"""Test finding closest string match."""
strings_list = ["hello", "world", "python", "test"]
# Exact match
result = find_closest_match("hello", strings_list)
assert result == "hello"
# Close match
result = find_closest_match("helo", strings_list)
assert result == "hello"
# Test with a string that really has no close match
# Use very different characters to ensure no match
result = find_closest_match("zzzzqqqqwwwweeee", strings_list)
# The function uses cutoff=0.2, so any result is still valid behavior
# Just test that it returns something or None
assert result is None or result in strings_list
def test_find_closest_match_empty_list(self):
"""Test finding closest match in empty list."""
result = find_closest_match("test", [])
assert result is None
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_util.py",
"license": "MIT License",
"lines": 541,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_version.py | from unittest.mock import Mock, patch
import httpx
import pytest
from langflow.utils.version import (
_compute_non_prerelease_version,
_get_version_info,
fetch_latest_version,
get_version_info,
is_nightly,
is_pre_release,
)
class TestComputeNonPrereleaseVersion:
"""Test cases for _compute_non_prerelease_version function."""
def test_compute_alpha_version(self):
"""Test computing non-prerelease version from alpha version."""
version = "1.2.3.a1"
result = _compute_non_prerelease_version(version)
assert result == "1.2.3"
def test_compute_beta_version(self):
"""Test computing non-prerelease version from beta version."""
version = "2.0.0.b2"
result = _compute_non_prerelease_version(version)
assert result == "2.0.0"
def test_compute_rc_version(self):
"""Test computing non-prerelease version from release candidate version."""
version = "1.5.0.rc1"
result = _compute_non_prerelease_version(version)
assert result == "1.5.0"
def test_compute_dev_version(self):
"""Test computing non-prerelease version from dev version."""
version = "1.0.0.dev123"
result = _compute_non_prerelease_version(version)
assert result == "1.0.0"
def test_compute_post_version(self):
"""Test computing non-prerelease version from post version."""
version = "1.1.0.post1"
result = _compute_non_prerelease_version(version)
assert result == "1.1.0"
def test_compute_stable_version(self):
"""Test computing non-prerelease version from stable version."""
version = "1.0.0"
result = _compute_non_prerelease_version(version)
assert result == "1.0.0"
def test_compute_version_with_multiple_keywords(self):
"""Test computing version with multiple prerelease keywords (first match)."""
version = "1.0.0.a1.dev"
result = _compute_non_prerelease_version(version)
assert result == "1.0.0"
def test_compute_version_no_dot_before_keyword(self):
"""Test computing version without dot before keyword."""
version = "1.0.0a1"
result = _compute_non_prerelease_version(version)
assert result == "1.0."
def test_compute_version_complex_format(self):
"""Test computing version with complex format."""
version = "2.1.0.rc2.post1"
result = _compute_non_prerelease_version(version)
assert result == "2.1.0"
class TestGetVersionInfo:
"""Test cases for _get_version_info function."""
@patch("langflow.utils.version.metadata")
def test_get_version_info_langflow_package(self, mock_metadata):
"""Test getting version info for langflow package."""
mock_metadata.version.return_value = "1.0.0"
result = _get_version_info()
assert result["version"] == "1.0.0"
assert result["main_version"] == "1.0.0"
assert result["package"] == "Langflow"
@patch("langflow.utils.version.metadata")
def test_get_version_info_langflow_base_package(self, mock_metadata):
"""Test getting version info for langflow-base package."""
from importlib import metadata as real_metadata
mock_metadata.PackageNotFoundError = real_metadata.PackageNotFoundError
def mock_version(pkg_name):
if pkg_name == "langflow":
raise mock_metadata.PackageNotFoundError
if pkg_name == "langflow-base":
return "1.0.0.dev123"
raise mock_metadata.PackageNotFoundError
mock_metadata.version.side_effect = mock_version
result = _get_version_info()
assert result["version"] == "1.0.0.dev123"
assert result["main_version"] == "1.0.0"
assert result["package"] == "Langflow Base"
@patch("langflow.utils.version.metadata")
def test_get_version_info_nightly_package(self, mock_metadata):
"""Test getting version info for nightly package."""
from importlib import metadata as real_metadata
mock_metadata.PackageNotFoundError = real_metadata.PackageNotFoundError
def mock_version(pkg_name):
if pkg_name in ["langflow", "langflow-base"]:
raise mock_metadata.PackageNotFoundError
if pkg_name == "langflow-nightly":
return "1.0.0.dev456"
raise mock_metadata.PackageNotFoundError
mock_metadata.version.side_effect = mock_version
result = _get_version_info()
assert result["version"] == "1.0.0.dev456"
assert result["main_version"] == "1.0.0"
assert result["package"] == "Langflow Nightly"
@patch("langflow.utils.version.metadata")
def test_get_version_info_base_nightly_package(self, mock_metadata):
"""Test getting version info for base nightly package."""
from importlib import metadata as real_metadata
mock_metadata.PackageNotFoundError = real_metadata.PackageNotFoundError
def mock_version(pkg_name):
if pkg_name in ["langflow", "langflow-base", "langflow-nightly"]:
raise mock_metadata.PackageNotFoundError
if pkg_name == "langflow-base-nightly":
return "1.0.0.a1"
raise mock_metadata.PackageNotFoundError
mock_metadata.version.side_effect = mock_version
result = _get_version_info()
assert result["version"] == "1.0.0.a1"
assert result["main_version"] == "1.0.0"
assert result["package"] == "Langflow Base Nightly"
@patch("langflow.utils.version.metadata")
def test_get_version_info_no_package_found(self, mock_metadata):
"""Test getting version info when no package is found."""
from importlib import metadata as real_metadata
mock_metadata.PackageNotFoundError = real_metadata.PackageNotFoundError
mock_metadata.version.side_effect = mock_metadata.PackageNotFoundError()
with pytest.raises(ValueError, match="Package not found from options"):
_get_version_info()
@patch("langflow.utils.version.metadata")
def test_get_version_info_import_error(self, mock_metadata):
"""Test getting version info when ImportError occurs."""
from importlib import metadata as real_metadata
mock_metadata.PackageNotFoundError = real_metadata.PackageNotFoundError
mock_metadata.version.side_effect = ImportError()
with pytest.raises(ValueError, match="Package not found from options"):
_get_version_info()
class TestIsPreRelease:
"""Test cases for is_pre_release function."""
def test_is_pre_release_alpha(self):
"""Test alpha versions are pre-release."""
assert is_pre_release("1.0.0a1") is True
assert is_pre_release("1.0.0.a1") is True
def test_is_pre_release_beta(self):
"""Test beta versions are pre-release."""
assert is_pre_release("1.0.0b1") is True
assert is_pre_release("1.0.0.b1") is True
def test_is_pre_release_rc(self):
"""Test release candidate versions are pre-release."""
assert is_pre_release("1.0.0rc1") is True
assert is_pre_release("1.0.0.rc1") is True
def test_is_not_pre_release_stable(self):
"""Test stable versions are not pre-release."""
assert is_pre_release("1.0.0") is False
def test_is_not_pre_release_dev(self):
"""Test dev versions are not considered pre-release."""
assert is_pre_release("1.0.0.dev123") is False
def test_is_not_pre_release_post(self):
"""Test post versions are not considered pre-release."""
assert is_pre_release("1.0.0.post1") is False
def test_is_pre_release_mixed_version(self):
"""Test mixed versions with pre-release markers."""
assert is_pre_release("1.0.0a1.dev123") is True
assert is_pre_release("1.0.0.rc1.post1") is True
class TestIsNightly:
"""Test cases for is_nightly function."""
def test_is_nightly_dev_version(self):
"""Test dev versions are nightly."""
assert is_nightly("1.0.0.dev123") is True
assert is_nightly("1.0.0dev456") is True
def test_is_not_nightly_stable(self):
"""Test stable versions are not nightly."""
assert is_nightly("1.0.0") is False
def test_is_not_nightly_alpha(self):
"""Test alpha versions are not nightly."""
assert is_nightly("1.0.0a1") is False
def test_is_not_nightly_beta(self):
"""Test beta versions are not nightly."""
assert is_nightly("1.0.0b1") is False
def test_is_not_nightly_rc(self):
"""Test release candidate versions are not nightly."""
assert is_nightly("1.0.0rc1") is False
def test_is_nightly_mixed_version(self):
"""Test mixed versions with dev marker."""
assert is_nightly("1.0.0a1.dev123") is True
class TestFetchLatestVersion:
"""Test cases for fetch_latest_version function."""
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_success(self, mock_httpx):
"""Test successful fetching of latest version."""
mock_response = Mock()
mock_response.json.return_value = {"releases": {"1.0.0": [], "1.1.0": [], "1.2.0": [], "2.0.0a1": []}}
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=False)
assert result == "1.2.0"
mock_httpx.get.assert_called_once_with("https://pypi.org/pypi/test-package/json")
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_with_prerelease(self, mock_httpx):
"""Test fetching latest version including prerelease."""
mock_response = Mock()
mock_response.json.return_value = {"releases": {"1.0.0": [], "1.1.0": [], "2.0.0a1": [], "2.0.0b1": []}}
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=True)
assert result == "2.0.0b1"
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_no_stable_versions(self, mock_httpx):
"""Test fetching when no stable versions exist."""
mock_response = Mock()
mock_response.json.return_value = {"releases": {"1.0.0a1": [], "1.0.0b1": [], "1.0.0rc1": []}}
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=False)
assert result is None
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_package_name_normalization(self, mock_httpx):
"""Test package name normalization."""
mock_response = Mock()
mock_response.json.return_value = {"releases": {"1.0.0": []}}
mock_httpx.get.return_value = mock_response
fetch_latest_version("Test Package Name", include_prerelease=False)
mock_httpx.get.assert_called_once_with("https://pypi.org/pypi/test-package-name/json")
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_http_error(self, mock_httpx):
"""Test handling HTTP errors."""
mock_httpx.get.side_effect = httpx.HTTPError("Network error")
result = fetch_latest_version("test-package", include_prerelease=False)
assert result is None
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_json_error(self, mock_httpx):
"""Test handling JSON parsing errors."""
mock_response = Mock()
mock_response.json.side_effect = ValueError("Invalid JSON")
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=False)
assert result is None
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_empty_releases(self, mock_httpx):
"""Test handling empty releases."""
mock_response = Mock()
mock_response.json.return_value = {"releases": {}}
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=False)
assert result is None
@patch("langflow.utils.version.httpx")
def test_fetch_latest_version_complex_versions(self, mock_httpx):
"""Test fetching with complex version numbers."""
mock_response = Mock()
mock_response.json.return_value = {
"releases": {"1.0.0": [], "1.0.1": [], "1.0.10": [], "1.0.2": [], "1.1.0": [], "2.0.0": []}
}
mock_httpx.get.return_value = mock_response
result = fetch_latest_version("test-package", include_prerelease=False)
# Should correctly parse and find the highest version
assert result == "2.0.0"
class TestGetVersionInfoFunction:
"""Test cases for get_version_info function."""
@patch("langflow.utils.version.VERSION_INFO")
def test_get_version_info_returns_version_info(self, mock_version_info):
"""Test that get_version_info returns VERSION_INFO."""
mock_version_info = {"version": "1.0.0", "main_version": "1.0.0", "package": "Langflow"}
with patch("langflow.utils.version.VERSION_INFO", mock_version_info):
result = get_version_info()
assert result == mock_version_info
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_version.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/utils/test_voice_utils.py | import base64
from unittest.mock import AsyncMock, MagicMock, mock_open, patch
import numpy as np
import pytest
from langflow.utils.voice_utils import (
BYTES_PER_16K_FRAME,
BYTES_PER_24K_FRAME,
BYTES_PER_SAMPLE,
FRAME_DURATION_MS,
SAMPLE_RATE_24K,
VAD_SAMPLE_RATE_16K,
_write_bytes_to_file,
resample_24k_to_16k,
write_audio_to_file,
)
class TestConstants:
"""Test the audio constants."""
def test_sample_rates(self):
"""Test sample rate constants."""
assert SAMPLE_RATE_24K == 24000
assert VAD_SAMPLE_RATE_16K == 16000
def test_frame_duration(self):
"""Test frame duration constant."""
assert FRAME_DURATION_MS == 20
def test_bytes_per_sample(self):
"""Test bytes per sample constant."""
assert BYTES_PER_SAMPLE == 2
def test_bytes_per_frame_calculations(self):
"""Test frame size calculations."""
# 24kHz: 24000 * 20 / 1000 * 2 = 960 bytes
expected_24k = int(24000 * 20 / 1000) * 2
assert expected_24k == BYTES_PER_24K_FRAME
assert BYTES_PER_24K_FRAME == 960
# 16kHz: 16000 * 20 / 1000 * 2 = 640 bytes
expected_16k = int(16000 * 20 / 1000) * 2
assert expected_16k == BYTES_PER_16K_FRAME
assert BYTES_PER_16K_FRAME == 640
class TestResample24kTo16k:
"""Test cases for resample_24k_to_16k function."""
def test_resample_correct_frame_size(self):
"""Test resampling with correct 960-byte frame."""
# Create a 960-byte frame (480 samples of int16)
rng = np.random.default_rng()
samples_24k = rng.integers(-32768, 32767, 480, dtype=np.int16)
frame_24k_bytes = samples_24k.tobytes()
assert len(frame_24k_bytes) == BYTES_PER_24K_FRAME
result = resample_24k_to_16k(frame_24k_bytes)
# Should return 640 bytes (320 samples)
assert len(result) == BYTES_PER_16K_FRAME
assert isinstance(result, bytes)
# Verify we can convert back to int16 array
result_samples = np.frombuffer(result, dtype=np.int16)
assert len(result_samples) == 320
def test_resample_with_zero_audio(self):
"""Test resampling with silent audio (all zeros)."""
# Create silent 24kHz frame
samples_24k = np.zeros(480, dtype=np.int16)
frame_24k_bytes = samples_24k.tobytes()
result = resample_24k_to_16k(frame_24k_bytes)
result_samples = np.frombuffer(result, dtype=np.int16)
# Result should also be mostly zeros (allowing for minor resampling artifacts)
assert np.max(np.abs(result_samples)) <= 1 # Very small values allowed
def test_resample_with_sine_wave(self):
"""Test resampling with a known sine wave."""
# Create a 1kHz sine wave at 24kHz sample rate
t = np.linspace(0, 0.02, 480, endpoint=False) # 20ms
sine_wave = np.sin(2 * np.pi * 1000 * t)
# Convert to int16 range
samples_24k = (sine_wave * 16384).astype(np.int16)
frame_24k_bytes = samples_24k.tobytes()
result = resample_24k_to_16k(frame_24k_bytes)
result_samples = np.frombuffer(result, dtype=np.int16)
# Verify the resampled wave maintains similar characteristics
assert len(result_samples) == 320
# The amplitude should be preserved roughly
assert np.max(result_samples) > 10000 # Should maintain significant amplitude
assert np.min(result_samples) < -10000
def test_resample_invalid_frame_size_too_small(self):
"""Test error handling for frame too small."""
invalid_frame = b"\x00" * 959 # 959 bytes instead of 960
with pytest.raises(ValueError, match=f"Expected exactly {BYTES_PER_24K_FRAME} bytes"):
resample_24k_to_16k(invalid_frame)
def test_resample_invalid_frame_size_too_large(self):
"""Test error handling for frame too large."""
invalid_frame = b"\x00" * 961 # 961 bytes instead of 960
with pytest.raises(ValueError, match=f"Expected exactly {BYTES_PER_24K_FRAME} bytes"):
resample_24k_to_16k(invalid_frame)
def test_resample_empty_frame(self):
"""Test error handling for empty frame."""
with pytest.raises(ValueError, match=f"Expected exactly {BYTES_PER_24K_FRAME} bytes"):
resample_24k_to_16k(b"")
def test_resample_very_large_frame(self):
"""Test error handling for very large frame."""
huge_frame = b"\x00" * 10000
with pytest.raises(ValueError, match=f"Expected exactly {BYTES_PER_24K_FRAME} bytes"):
resample_24k_to_16k(huge_frame)
def test_resample_preserves_data_type(self):
"""Test that resampling preserves int16 data type."""
# Create frame with extreme values
samples_24k = np.array([32767, -32768] * 240, dtype=np.int16) # 480 samples
frame_24k_bytes = samples_24k.tobytes()
result = resample_24k_to_16k(frame_24k_bytes)
result_samples = np.frombuffer(result, dtype=np.int16)
# Verify data type is preserved
assert result_samples.dtype == np.int16
# Values should still be in int16 range
assert np.all(result_samples >= -32768)
assert np.all(result_samples <= 32767)
def test_resample_ratio_verification(self):
"""Test that the resampling ratio is approximately 2/3."""
# Create a simple pattern
samples_24k = np.tile(np.array([1000, -1000], dtype=np.int16), 240) # 480 samples
frame_24k_bytes = samples_24k.tobytes()
result = resample_24k_to_16k(frame_24k_bytes)
result_samples = np.frombuffer(result, dtype=np.int16)
# Input: 480 samples, Output: 320 samples
# Ratio: 320/480 = 2/3 ≈ 0.667
ratio = len(result_samples) / len(samples_24k)
assert abs(ratio - 2 / 3) < 0.001
@patch("langflow.utils.voice_utils.resample")
def test_resample_function_called(self, mock_resample):
"""Test that scipy.signal.resample is called correctly."""
mock_resample.return_value = np.zeros(320, dtype=np.int16)
samples_24k = np.zeros(480, dtype=np.int16)
frame_24k_bytes = samples_24k.tobytes()
resample_24k_to_16k(frame_24k_bytes)
# Verify resample was called with correct parameters
mock_resample.assert_called_once()
args, _ = mock_resample.call_args
input_array, target_samples = args
assert len(input_array) == 480
assert target_samples == 320 # int(480 * 2 / 3)
class TestWriteAudioToFile:
"""Test cases for write_audio_to_file function."""
@pytest.mark.asyncio
async def test_write_audio_to_file_success(self):
"""Test successful audio file writing."""
audio_data = b"\x01\x02\x03\x04"
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
with (
patch("langflow.utils.voice_utils.asyncio.to_thread") as mock_to_thread,
patch("langflow.utils.voice_utils.logger") as mock_logger,
):
mock_to_thread.return_value = None
mock_logger.ainfo = AsyncMock()
await write_audio_to_file(audio_base64, "test.raw")
# Verify asyncio.to_thread was called correctly
mock_to_thread.assert_called_once()
args = mock_to_thread.call_args[0]
assert args[0] == _write_bytes_to_file
assert args[1] == audio_data
assert args[2] == "test.raw"
# Verify logging
mock_logger.ainfo.assert_called_once_with(f"Wrote {len(audio_data)} bytes to test.raw")
@pytest.mark.asyncio
async def test_write_audio_to_file_default_filename(self):
"""Test writing with default filename."""
audio_data = b"\x05\x06\x07\x08"
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
with patch("langflow.utils.voice_utils.asyncio.to_thread") as mock_to_thread:
mock_to_thread.return_value = None
await write_audio_to_file(audio_base64)
# Should use default filename
args = mock_to_thread.call_args[0]
assert args[2] == "output_audio.raw"
@pytest.mark.asyncio
async def test_write_audio_to_file_base64_decode_error(self):
"""Test error handling for invalid base64."""
invalid_base64 = "invalid base64 string!!!"
with patch("langflow.utils.voice_utils.logger") as mock_logger:
mock_logger.aerror = AsyncMock()
await write_audio_to_file(invalid_base64, "test.raw")
# Should log error
mock_logger.aerror.assert_called_once()
error_msg = mock_logger.aerror.call_args[0][0]
assert "Error writing audio to file:" in error_msg
@pytest.mark.asyncio
async def test_write_audio_to_file_os_error(self):
"""Test error handling for OS errors during file writing."""
audio_data = b"\x01\x02\x03\x04"
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
with (
patch("langflow.utils.voice_utils.asyncio.to_thread") as mock_to_thread,
patch("langflow.utils.voice_utils.logger") as mock_logger,
):
mock_to_thread.side_effect = OSError("File write error")
# Mock the async logger methods
mock_logger.aerror = AsyncMock()
await write_audio_to_file(audio_base64, "test.raw")
# Should log error
mock_logger.aerror.assert_called_once()
error_msg = mock_logger.aerror.call_args[0][0]
assert "Error writing audio to file:" in error_msg
@pytest.mark.asyncio
async def test_write_audio_to_file_empty_audio(self):
"""Test writing empty audio data."""
empty_audio = b""
audio_base64 = base64.b64encode(empty_audio).decode("utf-8")
with (
patch("langflow.utils.voice_utils.asyncio.to_thread") as mock_to_thread,
patch("langflow.utils.voice_utils.logger") as mock_logger,
):
mock_to_thread.return_value = None
mock_logger.ainfo = AsyncMock()
await write_audio_to_file(audio_base64, "empty.raw")
# Should still work and log
mock_to_thread.assert_called_once()
mock_logger.ainfo.assert_called_once_with("Wrote 0 bytes to empty.raw")
@pytest.mark.asyncio
async def test_write_audio_to_file_large_audio(self):
"""Test writing large audio data."""
large_audio = b"\x01" * 10000
audio_base64 = base64.b64encode(large_audio).decode("utf-8")
with (
patch("langflow.utils.voice_utils.asyncio.to_thread") as mock_to_thread,
patch("langflow.utils.voice_utils.logger") as mock_logger,
):
mock_to_thread.return_value = None
mock_logger.ainfo = AsyncMock()
await write_audio_to_file(audio_base64, "large.raw")
# Should handle large data correctly
args = mock_to_thread.call_args[0]
assert args[1] == large_audio
mock_logger.ainfo.assert_called_once_with("Wrote 10000 bytes to large.raw")
class TestWriteBytesToFile:
"""Test cases for _write_bytes_to_file function."""
def test_write_bytes_to_file_success(self):
"""Test successful bytes writing to file."""
test_data = b"\x01\x02\x03\x04\x05"
filename = "test_output.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(test_data, filename)
# Verify file was opened in append binary mode
mock_file.assert_called_once_with("ab")
# Verify data was written
mock_file().write.assert_called_once_with(test_data)
def test_write_bytes_to_file_path_construction(self):
"""Test that Path is constructed correctly."""
test_data = b"\x06\x07\x08"
filename = "test/path/file.raw"
with patch("langflow.utils.voice_utils.Path") as mock_path:
mock_path_instance = MagicMock()
mock_path.return_value = mock_path_instance
_write_bytes_to_file(test_data, filename)
# Verify Path was constructed with filename
mock_path.assert_called_once_with(filename)
# Verify open was called in append binary mode
mock_path_instance.open.assert_called_once_with("ab")
def test_write_bytes_to_file_empty_data(self):
"""Test writing empty bytes."""
empty_data = b""
filename = "empty.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(empty_data, filename)
# Should still attempt to write empty data
mock_file().write.assert_called_once_with(empty_data)
def test_write_bytes_to_file_large_data(self):
"""Test writing large bytes."""
large_data = b"\xff" * 50000
filename = "large.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(large_data, filename)
# Should write all data
mock_file().write.assert_called_once_with(large_data)
def test_write_bytes_to_file_append_mode(self):
"""Test that file is opened in append mode."""
test_data = b"\x10\x11\x12"
filename = "append_test.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(test_data, filename)
# Verify append binary mode
mock_file.assert_called_once_with("ab")
def test_write_bytes_to_file_context_manager(self):
"""Test that file is properly closed using context manager."""
test_data = b"\x13\x14\x15"
filename = "context_test.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(test_data, filename)
# Verify context manager was used (enter and exit called)
mock_file().__enter__.assert_called_once()
mock_file().__exit__.assert_called_once()
def test_write_bytes_to_file_multiple_calls(self):
"""Test multiple calls to write_bytes_to_file."""
data1 = b"\x20\x21"
data2 = b"\x22\x23"
filename = "multi_test.raw"
mock_file = mock_open()
with patch("langflow.utils.voice_utils.Path.open", mock_file):
_write_bytes_to_file(data1, filename)
_write_bytes_to_file(data2, filename)
# Should have been called twice
assert mock_file.call_count == 2
# Both calls should use append mode
assert all(call[0] == ("ab",) for call in mock_file.call_args_list)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/utils/test_voice_utils.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:docs/scripts/clean_openapi_formatting.py | #!/usr/bin/env python3
"""Clean OpenAPI specification formatting for better ReDoc display.
This script converts newlines in descriptions to HTML breaks for proper
rendering in ReDoc documentation.
"""
import json
import sys
from pathlib import Path
MIN_ARGS = 2
def clean_openapi_formatting(input_file: str, output_file: str | None = None) -> None:
"""Clean OpenAPI spec formatting by converting newlines to HTML breaks.
Args:
input_file: Path to input OpenAPI JSON file
output_file: Path to output file (defaults to overwriting input)
"""
if output_file is None:
output_file = input_file
try:
# Load the OpenAPI spec
input_path = Path(input_file)
with input_path.open(encoding="utf-8") as f:
spec = json.load(f)
# Fix description formatting by converting newlines to HTML breaks
if "paths" in spec:
for path_item in spec["paths"].values():
for operation in path_item.values():
if isinstance(operation, dict) and "description" in operation:
description = operation["description"]
if description:
# Convert newlines to HTML breaks for better ReDoc rendering
operation["description"] = description.replace("\n", "<br>")
# Save the cleaned spec
output_path = Path(output_file)
with output_path.open("w", encoding="utf-8") as f:
json.dump(spec, f, indent=2, ensure_ascii=False)
# Success message (using sys.stdout for consistency)
sys.stdout.write(f"OpenAPI spec cleaned successfully: {output_file}\n")
except FileNotFoundError:
sys.stderr.write(f"Error: File not found: {input_file}\n")
sys.exit(1)
except json.JSONDecodeError as e:
sys.stderr.write(f"Error: Invalid JSON in {input_file}: {e}\n")
sys.exit(1)
except OSError as e:
sys.stderr.write(f"Error: {e}\n")
sys.exit(1)
def main():
"""Main entry point."""
if len(sys.argv) < MIN_ARGS:
sys.stderr.write("Usage: python clean_openapi_formatting.py <input_file> [output_file]\n")
sys.exit(1)
input_file = sys.argv[1]
output_file = sys.argv[2] if len(sys.argv) > MIN_ARGS else None
clean_openapi_formatting(input_file, output_file)
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "docs/scripts/clean_openapi_formatting.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/lfx/src/lfx/base/models/google_generative_ai_model.py | from langchain_google_genai import ChatGoogleGenerativeAI
class ChatGoogleGenerativeAIFixed(ChatGoogleGenerativeAI):
"""Custom ChatGoogleGenerativeAI that fixes function response name issues for Gemini."""
def __init__(self, *args, **kwargs):
"""Initialize with fix for empty function response names in ToolMessage and FunctionMessage."""
if ChatGoogleGenerativeAI is None:
msg = "The 'langchain_google_genai' package is required to use the Google Generative AI model."
raise ImportError(msg)
# Initialize the parent class
super().__init__(*args, **kwargs)
def _prepare_request(self, messages, **kwargs):
"""Override request preparation to fix empty function response names."""
from langchain_core.messages import FunctionMessage, ToolMessage
# Pre-process messages to ensure tool/function messages have names
fixed_messages = []
for message in messages:
fixed_message = message
if isinstance(message, ToolMessage) and not message.name:
# Create a new ToolMessage with a default name
fixed_message = ToolMessage(
content=message.content,
name="tool_response",
tool_call_id=getattr(message, "tool_call_id", None),
artifact=getattr(message, "artifact", None),
)
elif isinstance(message, FunctionMessage) and not message.name:
# Create a new FunctionMessage with a default name
fixed_message = FunctionMessage(content=message.content, name="function_response")
fixed_messages.append(fixed_message)
# Call the parent's method with fixed messages
return super()._prepare_request(fixed_messages, **kwargs)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/base/models/google_generative_ai_model.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/agentql_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioAgentQLAPIComponent(ComposioBaseComponent):
display_name: str = "AgentQL"
icon = "AgentQL"
documentation: str = "https://docs.composio.dev"
app_name = "agentql"
def set_default_tools(self):
"""Set the default tools for AgentQL component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/agentql_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/agiled_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioAgiledAPIComponent(ComposioBaseComponent):
display_name: str = "Agiled"
icon = "Agiled"
documentation: str = "https://docs.composio.dev"
app_name = "agiled"
def set_default_tools(self):
"""Set the default tools for Agiled component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/agiled_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/bolna_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioBolnaAPIComponent(ComposioBaseComponent):
display_name: str = "Bolna"
icon = "Bolna"
documentation: str = "https://docs.composio.dev"
app_name = "bolna"
def set_default_tools(self):
"""Set the default tools for Bolna component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/bolna_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/brightdata_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioBrightdataAPIComponent(ComposioBaseComponent):
display_name: str = "Brightdata"
icon = "Brightdata"
documentation: str = "https://docs.composio.dev"
app_name = "brightdata"
def set_default_tools(self):
"""Set the default tools for Brightdata component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/brightdata_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/canvas_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioCanvasAPIComponent(ComposioBaseComponent):
display_name: str = "Canvas"
icon = "Canvas"
documentation: str = "https://docs.composio.dev"
app_name = "canvas"
def set_default_tools(self):
"""Set the default tools for Canvaas component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/canvas_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/digicert_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioDigicertAPIComponent(ComposioBaseComponent):
display_name: str = "Digicert"
icon = "Digicert"
documentation: str = "https://docs.composio.dev"
app_name = "digicert"
def set_default_tools(self):
"""Set the default tools for Digicert component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/digicert_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/jira_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioJiraAPIComponent(ComposioBaseComponent):
display_name: str = "Jira"
icon = "Jira"
documentation: str = "https://docs.composio.dev"
app_name = "jira"
def set_default_tools(self):
"""Set the default tools for Jira component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/jira_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/timelinesai_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioTimelinesAIAPIComponent(ComposioBaseComponent):
display_name: str = "TimelinesAI"
icon = "Timelinesai"
documentation: str = "https://docs.composio.dev"
app_name = "timelinesai"
def set_default_tools(self):
"""Set the default tools for TimelinesAI component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/timelinesai_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/finage_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioFinageAPIComponent(ComposioBaseComponent):
display_name: str = "Finage"
icon = "Finage"
documentation: str = "https://docs.composio.dev"
app_name = "finage"
def set_default_tools(self):
"""Set the default tools for Finage component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/finage_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/fixer_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioFixerAPIComponent(ComposioBaseComponent):
display_name: str = "Fixer"
icon = "Fixer"
documentation: str = "https://docs.composio.dev"
app_name = "fixer"
def set_default_tools(self):
"""Set the default tools for Fixer component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/fixer_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/flexisign_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioFlexisignAPIComponent(ComposioBaseComponent):
display_name: str = "Flexisign"
icon = "Flexisign"
documentation: str = "https://docs.composio.dev"
app_name = "flexisign"
def set_default_tools(self):
"""Set the default tools for Flexisign component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/flexisign_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/freshdesk_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioFreshdeskAPIComponent(ComposioBaseComponent):
display_name: str = "Freshdesk"
icon = "Freshdesk"
documentation: str = "https://docs.composio.dev"
app_name = "freshdesk"
def set_default_tools(self):
"""Set the default tools for Freshdesk component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/freshdesk_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/googleclassroom_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioGoogleclassroomAPIComponent(ComposioBaseComponent):
display_name: str = "Google Classroom"
icon = "Classroom"
documentation: str = "https://docs.composio.dev"
app_name = "GOOGLE_CLASSROOM"
def set_default_tools(self):
"""Set the default tools for Google Classroom component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/googleclassroom_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/instagram_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioInstagramAPIComponent(ComposioBaseComponent):
display_name: str = "Instagram"
icon = "Instagram"
documentation: str = "https://docs.composio.dev"
app_name = "instagram"
def set_default_tools(self):
"""Set the default tools for Instagram component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/instagram_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/jotform_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioJotformAPIComponent(ComposioBaseComponent):
display_name: str = "Jotform"
icon = "Jotform"
documentation: str = "https://docs.composio.dev"
app_name = "jotform"
def set_default_tools(self):
"""Set the default tools for Jotform component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/jotform_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/listennotes_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioListennotesAPIComponent(ComposioBaseComponent):
display_name: str = "Listennotes"
icon = "Listennotes"
documentation: str = "https://docs.composio.dev"
app_name = "listennotes"
def set_default_tools(self):
"""Set the default tools for Listennotes component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/listennotes_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/missive_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioMissiveAPIComponent(ComposioBaseComponent):
display_name: str = "Missive"
icon = "Missive"
documentation: str = "https://docs.composio.dev"
app_name = "missive"
def set_default_tools(self):
"""Set the default tools for Missive component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/missive_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/pandadoc_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioPandadocAPIComponent(ComposioBaseComponent):
display_name: str = "Pandadoc"
icon = "Pandadoc"
documentation: str = "https://docs.composio.dev"
app_name = "pandadoc"
def set_default_tools(self):
"""Set the default tools for Pandadoc component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/pandadoc_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_dynamic_create_data_component.py | from lfx.components.processing.dynamic_create_data import DynamicCreateDataComponent
from lfx.schema.data import Data
from lfx.schema.message import Message
class TestDynamicCreateDataComponent:
def test_update_build_config_creates_dynamic_inputs(self):
"""Test that dynamic inputs are created based on form_fields configuration."""
component = DynamicCreateDataComponent()
build_config = {"form_fields": {"value": []}}
field_value = [
{"field_name": "username", "field_type": "Text"},
{"field_name": "age", "field_type": "Number"},
{"field_name": "active", "field_type": "Boolean"},
]
result = component.update_build_config(build_config, field_value, "form_fields")
assert "dynamic_username" in result
assert "dynamic_age" in result
assert "dynamic_active" in result
assert result["dynamic_username"].display_name == "username"
assert result["dynamic_age"].display_name == "age"
def test_update_build_config_clears_old_dynamic_inputs(self):
"""Test that old dynamic inputs are removed when form_fields change."""
component = DynamicCreateDataComponent()
build_config = {
"dynamic_old_field": {"display_name": "Old Field"},
"form_fields": {"value": []},
}
field_value = [{"field_name": "new_field", "field_type": "Text"}]
result = component.update_build_config(build_config, field_value, "form_fields")
assert "dynamic_old_field" not in result
assert "dynamic_new_field" in result
def test_process_form_returns_data_with_simple_values(self):
"""Test that process_form extracts and returns simple values from inputs."""
component = DynamicCreateDataComponent()
component.form_fields = [
{"field_name": "name", "field_type": "Text"},
{"field_name": "count", "field_type": "Number"},
]
# Simulate manual input values
component.dynamic_name = "John Doe"
component.dynamic_count = 42
result = component.process_form()
assert isinstance(result, Data)
assert result.data["name"] == "John Doe"
assert result.data["count"] == 42
def test_extract_simple_value_handles_message_objects(self):
"""Test that Message objects are properly extracted to their text content."""
component = DynamicCreateDataComponent()
test_message = Message(text="Hello World")
result = component._extract_simple_value(test_message)
assert result == "Hello World"
assert isinstance(result, str)
def test_get_message_formats_data_as_text(self):
"""Test that get_message returns properly formatted text output."""
component = DynamicCreateDataComponent()
component.form_fields = [
{"field_name": "title", "field_type": "Text"},
{"field_name": "enabled", "field_type": "Boolean"},
]
component.dynamic_title = "Test Title"
component.dynamic_enabled = True
result = component.get_message()
assert isinstance(result, Message)
assert "title" in result.text
assert "Test Title" in result.text
assert "enabled" in result.text
def test_handles_none_field_config_gracefully(self):
"""Test that None values in form_fields are handled without errors."""
component = DynamicCreateDataComponent()
build_config = {"form_fields": {"value": []}}
field_value = [
{"field_name": "valid_field", "field_type": "Text"},
None, # This should be skipped
{"field_name": "another_field", "field_type": "Number"},
]
result = component.update_build_config(build_config, field_value, "form_fields")
assert "dynamic_valid_field" in result
assert "dynamic_another_field" in result
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/components/processing/test_dynamic_create_data_component.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/components/processing/dynamic_create_data.py | from typing import Any
from lfx.custom import Component
from lfx.io import (
BoolInput,
FloatInput,
HandleInput,
IntInput,
MultilineInput,
Output,
StrInput,
TableInput,
)
from lfx.schema.data import Data
from lfx.schema.message import Message
class DynamicCreateDataComponent(Component):
display_name: str = "Dynamic Create Data"
description: str = "Dynamically create a Data with a specified number of fields."
name: str = "DynamicCreateData"
MAX_FIELDS = 15 # Define a constant for maximum number of fields
icon = "ListFilter"
def __init__(self, **kwargs):
super().__init__(**kwargs)
inputs = [
TableInput(
name="form_fields",
display_name="Input Configuration",
info=(
"Define the dynamic form fields. Each row creates a new input field "
"that can connect to other components."
),
table_schema=[
{
"name": "field_name",
"display_name": "Field Name",
"type": "str",
"description": "Name for the field (used as both internal name and display label)",
},
{
"name": "field_type",
"display_name": "Field Type",
"type": "str",
"description": "Type of input field to create",
"options": ["Text", "Data", "Number", "Handle", "Boolean"],
"value": "Text",
},
],
value=[],
real_time_refresh=True,
),
BoolInput(
name="include_metadata",
display_name="Include Metadata",
info="Include form configuration metadata in the output.",
value=False,
advanced=True,
),
]
outputs = [
Output(display_name="Data", name="form_data", method="process_form"),
Output(display_name="Message", name="message", method="get_message"),
]
def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:
"""Update build configuration to add dynamic inputs that can connect to other components."""
if field_name == "form_fields":
# Clear existing dynamic inputs from build config
keys_to_remove = [key for key in build_config if key.startswith("dynamic_")]
for key in keys_to_remove:
del build_config[key]
# Add dynamic inputs based on table configuration
# Safety check to ensure field_value is not None and is iterable
if field_value is None:
field_value = []
for i, field_config in enumerate(field_value):
# Safety check to ensure field_config is not None
if field_config is None:
continue
field_name = field_config.get("field_name", f"field_{i}")
display_name = field_name # Use field_name as display_name
field_type_option = field_config.get("field_type", "Text")
default_value = "" # All fields have empty default value
required = False # All fields are optional by default
help_text = "" # All fields have empty help text
# Map field type options to actual field types and input types
field_type_mapping = {
"Text": {"field_type": "multiline", "input_types": ["Text", "Message"]},
"Data": {"field_type": "data", "input_types": ["Data"]},
"Number": {"field_type": "number", "input_types": ["Text", "Message"]},
"Handle": {"field_type": "handle", "input_types": ["Text", "Data", "Message"]},
"Boolean": {"field_type": "boolean", "input_types": None},
}
field_config_mapped = field_type_mapping.get(
field_type_option, {"field_type": "text", "input_types": []}
)
if not isinstance(field_config_mapped, dict):
field_config_mapped = {"field_type": "text", "input_types": []}
field_type = field_config_mapped["field_type"]
input_types_list = field_config_mapped["input_types"]
# Create the appropriate input type based on field_type
dynamic_input_name = f"dynamic_{field_name}"
if field_type == "text":
if input_types_list:
build_config[dynamic_input_name] = StrInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Can connect to: {', '.join(input_types_list)})",
value=default_value,
required=required,
input_types=input_types_list,
)
else:
build_config[dynamic_input_name] = StrInput(
name=dynamic_input_name,
display_name=display_name,
info=help_text,
value=default_value,
required=required,
)
elif field_type == "multiline":
if input_types_list:
build_config[dynamic_input_name] = MultilineInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Can connect to: {', '.join(input_types_list)})",
value=default_value,
required=required,
input_types=input_types_list,
)
else:
build_config[dynamic_input_name] = MultilineInput(
name=dynamic_input_name,
display_name=display_name,
info=help_text,
value=default_value,
required=required,
)
elif field_type == "number":
try:
default_int = int(default_value) if default_value else 0
except ValueError:
default_int = 0
if input_types_list:
build_config[dynamic_input_name] = IntInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Can connect to: {', '.join(input_types_list)})",
value=default_int,
required=required,
input_types=input_types_list,
)
else:
build_config[dynamic_input_name] = IntInput(
name=dynamic_input_name,
display_name=display_name,
info=help_text,
value=default_int,
required=required,
)
elif field_type == "float":
try:
default_float = float(default_value) if default_value else 0.0
except ValueError:
default_float = 0.0
if input_types_list:
build_config[dynamic_input_name] = FloatInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Can connect to: {', '.join(input_types_list)})",
value=default_float,
required=required,
input_types=input_types_list,
)
else:
build_config[dynamic_input_name] = FloatInput(
name=dynamic_input_name,
display_name=display_name,
info=help_text,
value=default_float,
required=required,
)
elif field_type == "boolean":
default_bool = default_value.lower() in ["true", "1", "yes"] if default_value else False
# Boolean fields don't use input_types parameter to avoid errors
build_config[dynamic_input_name] = BoolInput(
name=dynamic_input_name,
display_name=display_name,
info=help_text,
value=default_bool,
input_types=[],
required=required,
)
elif field_type == "handle":
# HandleInput for generic data connections
build_config[dynamic_input_name] = HandleInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Accepts: {', '.join(input_types_list) if input_types_list else 'Any'})",
input_types=input_types_list if input_types_list else ["Data", "Text", "Message"],
required=required,
)
elif field_type == "data":
# Specialized for Data type connections
build_config[dynamic_input_name] = HandleInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Data input)",
input_types=input_types_list if input_types_list else ["Data"],
required=required,
)
else:
# Default to text input for unknown types
build_config[dynamic_input_name] = StrInput(
name=dynamic_input_name,
display_name=display_name,
info=f"{help_text} (Unknown type '{field_type}', defaulting to text)",
value=default_value,
required=required,
)
return build_config
def get_dynamic_values(self) -> dict[str, Any]:
"""Extract simple values from all dynamic inputs, handling both manual and connected inputs."""
dynamic_values = {}
connection_info = {}
form_fields = getattr(self, "form_fields", [])
for field_config in form_fields:
# Safety check to ensure field_config is not None
if field_config is None:
continue
field_name = field_config.get("field_name", "")
if field_name:
dynamic_input_name = f"dynamic_{field_name}"
value = getattr(self, dynamic_input_name, None)
# Extract simple values from connections or manual input
if value is not None:
try:
extracted_value = self._extract_simple_value(value)
dynamic_values[field_name] = extracted_value
# Determine connection type for status
if hasattr(value, "text") and hasattr(value, "timestamp"):
connection_info[field_name] = "Connected (Message)"
elif hasattr(value, "data"):
connection_info[field_name] = "Connected (Data)"
elif isinstance(value, (str, int, float, bool, list, dict)):
connection_info[field_name] = "Manual input"
else:
connection_info[field_name] = "Connected (Object)"
except (AttributeError, TypeError, ValueError):
# Fallback to string representation if all else fails
dynamic_values[field_name] = str(value)
connection_info[field_name] = "Error"
else:
# Use empty default value if nothing connected
dynamic_values[field_name] = ""
connection_info[field_name] = "Empty default"
# Store connection info for status output
self._connection_info = connection_info
return dynamic_values
def _extract_simple_value(self, value: Any) -> Any:
"""Extract the simplest, most useful value from any input type."""
# Handle None
if value is None:
return None
# Handle simple types directly
if isinstance(value, (str, int, float, bool)):
return value
# Handle lists and tuples - keep simple
if isinstance(value, (list, tuple)):
return [self._extract_simple_value(item) for item in value]
# Handle dictionaries - keep simple
if isinstance(value, dict):
return {str(k): self._extract_simple_value(v) for k, v in value.items()}
# Handle Message objects - extract only the text
if hasattr(value, "text"):
return str(value.text) if value.text is not None else ""
# Handle Data objects - extract the data content
if hasattr(value, "data") and value.data is not None:
return self._extract_simple_value(value.data)
# For any other object, convert to string
return str(value)
def process_form(self) -> Data:
"""Process all dynamic form inputs and return clean data with just field values."""
# Get all dynamic values (just the key:value pairs)
dynamic_values = self.get_dynamic_values()
# Update status with connection info
connected_fields = len([v for v in getattr(self, "_connection_info", {}).values() if "Connected" in v])
total_fields = len(dynamic_values)
self.status = f"Form processed successfully. {connected_fields}/{total_fields} fields connected to components."
# Return clean Data object with just the field values
return Data(data=dynamic_values)
def get_message(self) -> Message:
"""Return form data as a formatted text message."""
# Get all dynamic values
dynamic_values = self.get_dynamic_values()
if not dynamic_values:
return Message(text="No form data available")
# Format as text message
message_lines = ["📋 Form Data:"]
message_lines.append("=" * 40)
for field_name, value in dynamic_values.items():
# Use field_name as display_name
display_name = field_name
message_lines.append(f"• {display_name}: {value}")
message_lines.append("=" * 40)
message_lines.append(f"Total fields: {len(dynamic_values)}")
message_text = "\n".join(message_lines)
self.status = f"Message formatted with {len(dynamic_values)} fields"
return Message(text=message_text)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/processing/dynamic_create_data.py",
"license": "MIT License",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:scripts/build_component_index.py | """Build a static component index for fast startup.
This script generates a prebuilt index of all built-in components by walking
through the lfx.components package and processing each module. The index is
saved as a JSON file that can be loaded instantly at runtime, avoiding the
need to import all component modules during startup.
"""
import hashlib
import sys
from pathlib import Path
import orjson
def _get_lfx_version():
"""Get the installed lfx version.
Components are located in LFX, so use LFX.
"""
from importlib.metadata import version
version = version("lfx")
print(f"Retrieved LFX version: {version}")
return version
def _normalize_for_determinism(obj):
"""Recursively normalize data structures for deterministic serialization.
Sorts dictionaries by key to ensure consistent ordering. Lists are kept in
their original order since many lists are semantically ordered (e.g., field_order,
display_order, etc.).
Note: If upstream code produces nondeterministic list ordering (e.g., from
reflection or set iteration), this function will NOT fix it. Ensure lists
are deterministically ordered before calling this function, or consider
sorting specific list fields that are semantically unordered (e.g., tags).
"""
if isinstance(obj, dict):
# Recursively normalize all dict values and return sorted by keys
return {k: _normalize_for_determinism(v) for k, v in sorted(obj.items())}
if isinstance(obj, list):
# Recursively normalize list items but preserve order
# Lists like field_order, display_order, etc. are semantically ordered
return [_normalize_for_determinism(item) for item in obj]
# Primitive types, return as-is
return obj
def _strip_dynamic_fields(obj):
"""Recursively remove dynamic fields that change with external dependencies.
This prevents unnecessary hash changes and git history bloat when dependencies update.
Timestamps are stripped to ensure deterministic builds - version is used as the timeline.
"""
# List of field names that are dynamically populated from external sources
# or contain runtime-specific data
dynamic_field_names = {"timestamp", "deprecated_at"}
if isinstance(obj, dict):
return {k: _strip_dynamic_fields(v) for k, v in obj.items() if k not in dynamic_field_names}
if isinstance(obj, list):
return [_strip_dynamic_fields(item) for item in obj]
return obj
def _import_components() -> tuple[dict, int]:
"""Import all lfx components using the async import function.
Returns:
Tuple of (modules_dict, components_count)
Raises:
RuntimeError: If component import fails
"""
import asyncio
from lfx.interface.components import import_langflow_components
try:
# Run the async function
components_result = asyncio.run(import_langflow_components())
modules_dict = components_result.get("components", {})
components_count = sum(len(v) for v in modules_dict.values())
print(f"Discovered {components_count} components across {len(modules_dict)} categories")
except Exception as e:
msg = f"Failed to import components: {e}"
raise RuntimeError(msg) from e
else:
return modules_dict, components_count
def build_component_index() -> dict:
"""Build the component index by scanning all modules in lfx.components.
Returns:
A dictionary containing version, entries, and sha256 hash
Raises:
RuntimeError: If index cannot be built
ValueError: If existing index is invalid
"""
print("Building component index...")
modules_dict, components_count = _import_components()
current_version = _get_lfx_version()
# Convert modules_dict to entries format and sort for determinism
# Sort by category name (top_level) to ensure consistent ordering
entries = []
for category_name in sorted(modules_dict.keys()):
# Sort components within each category by component name
components_dict = modules_dict[category_name]
sorted_components = {}
for comp_name in sorted(components_dict.keys()):
# Make defensive copies to avoid mutating the original component object
component = dict(components_dict[comp_name])
component["metadata"] = dict(component.get("metadata", {}))
sorted_components[comp_name] = component
entries.append([category_name, sorted_components])
index = {
"version": current_version,
"metadata": {
"num_modules": len(modules_dict),
"num_components": components_count,
},
"entries": entries,
}
# Strip dynamic fields from component templates BEFORE normalization
# This prevents changes in external dependencies (like litellm model lists) from changing the hash
print("\nStripping dynamic fields from component metadata...")
index = _strip_dynamic_fields(index)
# Normalize the entire structure for deterministic output
index = _normalize_for_determinism(index)
# Calculate SHA256 hash for integrity verification
# IMPORTANT: Hash is computed BEFORE adding the sha256 field itself
# Determinism relies on BOTH:
# 1. _normalize_for_determinism() - recursively sorts dict keys
# 2. orjson.OPT_SORT_KEYS - ensures consistent serialization
#
# To verify integrity later, you must:
# 1. Load the index
# 2. Remove the 'sha256' field
# 3. Serialize with OPT_SORT_KEYS
# 4. Compare SHA256 hashes
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest() # type: ignore[index]
return index
# Standard location for component index
COMPONENT_INDEX_PATH = Path(__file__).parent.parent / "src" / "lfx" / "src" / "lfx" / "_assets" / "component_index.json"
def main():
"""Main entry point for building the component index."""
try:
# Build the index - will raise on any error
index = build_component_index()
except Exception as e: # noqa: BLE001
print(f"Failed to build component index: {e}", file=sys.stderr)
sys.exit(1)
# Use the standard component index path (defined at module level)
output_path = COMPONENT_INDEX_PATH
# Create directory if it doesn't exist
output_path.parent.mkdir(parents=True, exist_ok=True)
# Pretty-print for readable git diffs and resolvable merge conflicts
print(f"\nWriting formatted index to {output_path}")
json_bytes = orjson.dumps(index, option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2)
output_path.write_text(json_bytes.decode("utf-8"), encoding="utf-8")
print("\nIndex successfully written!")
print(f" Version: {index['version']}")
print(f" Modules: {index['metadata']['num_modules']}")
print(f" Components: {index['metadata']['num_components']}")
print(f" SHA256: {index['sha256']}")
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "scripts/build_component_index.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/tests/unit/test_build_component_index.py | """Tests for the build_component_index.py script."""
import hashlib
from pathlib import Path
from unittest.mock import patch
import orjson
import pytest
class TestBuildComponentIndexScript:
"""Tests for the build_component_index.py script."""
def test_build_script_creates_valid_structure(self):
"""Test that the build script creates a valid index structure."""
import importlib.util
import sys
# Get path to build script
script_path = Path(__file__).parent.parent.parent.parent / "scripts" / "build_component_index.py"
if not script_path.exists():
pytest.skip("build_component_index.py script not found")
# Load the module
spec = importlib.util.spec_from_file_location("build_component_index", script_path)
build_module = importlib.util.module_from_spec(spec)
sys.modules["build_component_index"] = build_module
with patch("asyncio.run") as mock_run:
# Mock component data
mock_run.return_value = {
"components": {
"TestCategory": {
"TestComponent": {
"display_name": "Test Component",
"description": "A test component",
"template": {"code": {"type": "code"}},
}
}
}
}
spec.loader.exec_module(build_module)
index = build_module.build_component_index()
assert index is not None
assert "version" in index
assert "entries" in index
assert "sha256" in index
assert isinstance(index["entries"], list)
def test_build_script_minifies_json(self, tmp_path):
"""Test that the build script always minifies JSON output."""
import importlib.util
import sys
script_path = Path(__file__).parent.parent.parent.parent / "scripts" / "build_component_index.py"
if not script_path.exists():
pytest.skip("build_component_index.py script not found")
spec = importlib.util.spec_from_file_location("build_component_index", script_path)
build_module = importlib.util.module_from_spec(spec)
sys.modules["build_component_index"] = build_module
with (
patch("asyncio.run") as mock_run,
patch("importlib.metadata.version", return_value="1.0.0.test"),
):
mock_run.return_value = {
"components": {
"TestCategory": {
"TestComponent": {
"display_name": "Test",
"template": {},
}
}
}
}
spec.loader.exec_module(build_module)
index = build_module.build_component_index()
# Write using the build module's logic
json_bytes = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
test_file = tmp_path / "test_index.json"
test_file.write_text(json_bytes.decode("utf-8"), encoding="utf-8")
# Verify it's minified (single line)
content = test_file.read_text()
lines = content.strip().split("\n")
assert len(lines) == 1, "JSON should be minified to a single line"
def test_build_script_sha256_integrity(self):
"""Test that SHA256 hash is correctly calculated."""
import importlib.util
import sys
script_path = Path(__file__).parent.parent.parent.parent / "scripts" / "build_component_index.py"
if not script_path.exists():
pytest.skip("build_component_index.py script not found")
spec = importlib.util.spec_from_file_location("build_component_index", script_path)
build_module = importlib.util.module_from_spec(spec)
sys.modules["build_component_index"] = build_module
with (
patch("asyncio.run") as mock_run,
patch("importlib.metadata.version", return_value="1.0.0.test"),
):
mock_run.return_value = {"components": {"TestCategory": {"TestComponent": {"template": {}}}}}
spec.loader.exec_module(build_module)
index = build_module.build_component_index()
# Verify hash
index_without_hash = {"version": index["version"], "entries": index["entries"]}
payload = orjson.dumps(index_without_hash, option=orjson.OPT_SORT_KEYS)
expected_hash = hashlib.sha256(payload).hexdigest()
assert index["sha256"] == expected_hash
def test_build_script_handles_import_errors(self):
"""Test that build script handles import errors gracefully."""
import importlib.util
import sys
script_path = Path(__file__).parent.parent.parent.parent / "scripts" / "build_component_index.py"
if not script_path.exists():
pytest.skip("build_component_index.py script not found")
spec = importlib.util.spec_from_file_location("build_component_index", script_path)
build_module = importlib.util.module_from_spec(spec)
sys.modules["build_component_index"] = build_module
with patch("asyncio.run", side_effect=ImportError("Cannot import")):
spec.loader.exec_module(build_module)
index = build_module.build_component_index()
assert index is None
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_build_component_index.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/test_component_index.py | """Unit tests for component index system."""
import hashlib
from pathlib import Path
from unittest.mock import Mock, patch
import orjson
import pytest
from lfx.interface.components import (
_get_cache_path,
_parse_dev_mode,
_read_component_index,
_save_generated_index,
import_langflow_components,
)
class TestParseDevMode:
"""Tests for _parse_dev_mode() function."""
def test_dev_mode_not_set(self, monkeypatch):
"""Test default behavior when LFX_DEV is not set."""
monkeypatch.delenv("LFX_DEV", raising=False)
enabled, modules = _parse_dev_mode()
assert enabled is False
assert modules is None
def test_dev_mode_enabled_with_1(self, monkeypatch):
"""Test dev mode enabled with LFX_DEV=1."""
monkeypatch.setenv("LFX_DEV", "1")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules is None # Load all modules
def test_dev_mode_enabled_with_true(self, monkeypatch):
"""Test dev mode enabled with LFX_DEV=true."""
monkeypatch.setenv("LFX_DEV", "true")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules is None
def test_dev_mode_enabled_with_yes(self, monkeypatch):
"""Test dev mode enabled with LFX_DEV=yes."""
monkeypatch.setenv("LFX_DEV", "yes")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules is None
def test_dev_mode_disabled_with_0(self, monkeypatch):
"""Test dev mode disabled with LFX_DEV=0."""
monkeypatch.setenv("LFX_DEV", "0")
enabled, modules = _parse_dev_mode()
assert enabled is False
assert modules is None
def test_dev_mode_disabled_with_false(self, monkeypatch):
"""Test dev mode disabled with LFX_DEV=false."""
monkeypatch.setenv("LFX_DEV", "false")
enabled, modules = _parse_dev_mode()
assert enabled is False
assert modules is None
def test_dev_mode_disabled_with_empty(self, monkeypatch):
"""Test dev mode disabled with empty value."""
monkeypatch.setenv("LFX_DEV", "")
enabled, modules = _parse_dev_mode()
assert enabled is False
assert modules is None
def test_dev_mode_case_insensitive(self, monkeypatch):
"""Test that env var is case insensitive."""
monkeypatch.setenv("LFX_DEV", "TRUE")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules is None
monkeypatch.setenv("LFX_DEV", "YES")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules is None
def test_dev_mode_single_module(self, monkeypatch):
"""Test dev mode with a single module filter."""
monkeypatch.setenv("LFX_DEV", "mistral")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules == ["mistral"]
def test_dev_mode_multiple_modules(self, monkeypatch):
"""Test dev mode with multiple module filters."""
monkeypatch.setenv("LFX_DEV", "mistral,openai,anthropic")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules == ["mistral", "openai", "anthropic"]
def test_dev_mode_modules_with_spaces(self, monkeypatch):
"""Test dev mode filters spaces correctly."""
monkeypatch.setenv("LFX_DEV", "mistral, openai , anthropic")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules == ["mistral", "openai", "anthropic"]
def test_dev_mode_modules_case_normalized(self, monkeypatch):
"""Test that module names are lowercased."""
monkeypatch.setenv("LFX_DEV", "Mistral,OpenAI")
enabled, modules = _parse_dev_mode()
assert enabled is True
assert modules == ["mistral", "openai"]
class TestReadComponentIndex:
"""Tests for _read_component_index() function."""
def test_read_index_file_not_found(self):
"""Test reading index when file doesn't exist."""
mock_path = Mock()
mock_path.exists.return_value = False
with patch("lfx.interface.components.Path") as mock_path_class:
mock_path_class.return_value = mock_path
result = _read_component_index()
assert result is None
def test_read_index_valid(self, tmp_path):
"""Test reading valid index file."""
# Create valid index
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
index_file = tmp_path / "component_index.json"
index_file.write_bytes(orjson.dumps(index, option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2))
# Mock the path resolution
with (
patch("lfx.interface.components.inspect.getfile") as mock_getfile,
patch("importlib.metadata.version") as mock_version,
):
mock_getfile.return_value = str(tmp_path / "lfx" / "__init__.py")
mock_version.return_value = "0.1.12"
# Create the directory structure
(tmp_path / "lfx" / "_assets").mkdir(parents=True)
(tmp_path / "lfx" / "_assets" / "component_index.json").write_bytes(
orjson.dumps(index, option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2)
)
result = _read_component_index()
assert result is not None
assert result["version"] == "0.1.12"
assert "entries" in result
assert result["sha256"] == index["sha256"]
def test_read_index_invalid_sha256(self, tmp_path):
"""Test reading index with invalid SHA256."""
# Create index with bad hash
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
"sha256": "invalid_hash",
}
index_file = tmp_path / "component_index.json"
index_file.write_bytes(orjson.dumps(index))
with (
patch("lfx.interface.components.inspect.getfile") as mock_getfile,
patch("importlib.metadata.version") as mock_version,
):
mock_getfile.return_value = str(tmp_path / "lfx" / "__init__.py")
mock_version.return_value = "0.1.12"
(tmp_path / "lfx" / "_assets").mkdir(parents=True)
(tmp_path / "lfx" / "_assets" / "component_index.json").write_bytes(orjson.dumps(index))
result = _read_component_index()
assert result is None
def test_read_index_version_mismatch(self, tmp_path):
"""Test reading index with mismatched version."""
index = {
"version": "0.1.11",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
with (
patch("lfx.interface.components.inspect.getfile") as mock_getfile,
patch("importlib.metadata.version") as mock_version,
):
mock_getfile.return_value = str(tmp_path / "lfx" / "__init__.py")
mock_version.return_value = "0.1.12" # Different version
(tmp_path / "lfx" / "_assets").mkdir(parents=True)
(tmp_path / "lfx" / "_assets" / "component_index.json").write_bytes(
orjson.dumps(index, option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2)
)
result = _read_component_index()
assert result is None
def test_read_index_custom_path_file(self, tmp_path):
"""Test reading index from custom file path."""
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
custom_file = tmp_path / "custom_index.json"
custom_file.write_bytes(orjson.dumps(index, option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2))
with patch("importlib.metadata.version") as mock_version:
mock_version.return_value = "0.1.12"
result = _read_component_index(str(custom_file))
assert result is not None
assert result["version"] == "0.1.12"
def test_read_index_custom_path_url(self):
"""Test reading index from URL."""
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
mock_response = Mock()
mock_response.content = orjson.dumps(index)
with (
patch("httpx.get", return_value=mock_response),
patch("importlib.metadata.version", return_value="0.1.12"),
):
result = _read_component_index("https://example.com/index.json")
assert result is not None
assert result["version"] == "0.1.12"
class TestCachePath:
"""Tests for cache path functionality."""
def test_get_cache_path_returns_path(self):
"""Test that _get_cache_path returns a Path object."""
result = _get_cache_path()
assert isinstance(result, Path)
assert result.name == "component_index.json"
assert "lfx" in str(result)
class TestSaveGeneratedIndex:
"""Tests for _save_generated_index() function."""
def test_save_generated_index(self, tmp_path, monkeypatch):
"""Test saving generated index to cache."""
modules_dict = {
"category1": {"comp1": {"template": {}, "display_name": "Component 1"}},
"category2": {"comp2": {"template": {}, "display_name": "Component 2"}},
}
cache_file = tmp_path / "component_index.json"
monkeypatch.setattr("lfx.interface.components._get_cache_path", lambda: cache_file)
with patch("importlib.metadata.version", return_value="0.1.12"):
_save_generated_index(modules_dict)
assert cache_file.exists()
saved_index = orjson.loads(cache_file.read_bytes())
assert saved_index["version"] == "0.1.12"
assert "entries" in saved_index
assert "sha256" in saved_index
assert len(saved_index["entries"]) == 2
def test_save_generated_index_empty_dict(self, tmp_path, monkeypatch):
"""Test saving empty modules dict."""
cache_file = tmp_path / "component_index.json"
monkeypatch.setattr("lfx.interface.components._get_cache_path", lambda: cache_file)
with patch("importlib.metadata.version", return_value="0.1.12"):
_save_generated_index({})
assert cache_file.exists()
saved_index = orjson.loads(cache_file.read_bytes())
assert len(saved_index["entries"]) == 0
@pytest.mark.asyncio
class TestImportLangflowComponents:
"""Tests for import_langflow_components() async function."""
async def test_import_with_dev_mode(self, monkeypatch):
"""Test import in dev mode (dynamic loading)."""
monkeypatch.setenv("LFX_DEV", "1")
with patch("lfx.interface.components._process_single_module") as mock_process:
mock_process.return_value = ("category1", {"comp1": {"template": {}}})
with (
patch("lfx.interface.components.pkgutil.walk_packages") as mock_walk,
patch("lfx.interface.components._save_generated_index") as mock_save,
):
mock_walk.return_value = [
(None, "lfx.components.category1", False),
]
result = await import_langflow_components()
assert "components" in result
assert "category1" in result["components"]
# In dev mode, we don't save to cache
assert not mock_save.called
async def test_import_with_builtin_index(self, monkeypatch):
"""Test import with valid built-in index."""
monkeypatch.delenv("LFX_DEV", raising=False)
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
with (
patch("lfx.interface.components._read_component_index") as mock_read,
patch("importlib.metadata.version", return_value="0.1.12"),
):
mock_read.return_value = index
result = await import_langflow_components()
assert "components" in result
assert "category1" in result["components"]
assert "comp1" in result["components"]["category1"]
async def test_import_with_missing_index_creates_cache(self, tmp_path, monkeypatch):
"""Test import with missing index falls back to dynamic and caches."""
monkeypatch.delenv("LFX_DEV", raising=False)
cache_file = tmp_path / "component_index.json"
monkeypatch.setattr("lfx.interface.components._get_cache_path", lambda: cache_file)
with (
patch("lfx.interface.components._read_component_index") as mock_read,
patch("lfx.interface.components._process_single_module") as mock_process,
patch("lfx.interface.components.pkgutil.walk_packages") as mock_walk,
patch("importlib.metadata.version", return_value="0.1.12"),
):
# Simulate missing built-in index and cache
mock_read.return_value = None
mock_process.return_value = ("category1", {"comp1": {"template": {}}})
mock_walk.return_value = [(None, "lfx.components.category1", False)]
result = await import_langflow_components()
assert "components" in result
assert cache_file.exists()
async def test_import_with_custom_path_from_settings(self, tmp_path, monkeypatch):
"""Test import with custom index path from settings."""
monkeypatch.delenv("LFX_DEV", raising=False)
index = {
"version": "0.1.12",
"entries": [["category1", {"comp1": {"template": {}}}]],
}
payload = orjson.dumps(index, option=orjson.OPT_SORT_KEYS)
index["sha256"] = hashlib.sha256(payload).hexdigest()
custom_file = tmp_path / "custom_index.json"
custom_file.write_bytes(orjson.dumps(index))
mock_settings = Mock()
mock_settings.settings.components_index_path = str(custom_file)
with (
patch("lfx.interface.components._read_component_index") as mock_read,
patch("importlib.metadata.version", return_value="0.1.12"),
):
mock_read.return_value = index
result = await import_langflow_components(mock_settings)
assert "components" in result
# Verify custom path was used
mock_read.assert_called_with(str(custom_file))
async def test_import_handles_import_errors(self, monkeypatch):
"""Test import handles component import errors gracefully."""
monkeypatch.setenv("LFX_DEV", "1")
with (
patch("lfx.interface.components._process_single_module") as mock_process,
patch("lfx.interface.components.pkgutil.walk_packages") as mock_walk,
):
# Simulate an import error
mock_process.side_effect = ImportError("Failed to import")
mock_walk.return_value = [(None, "lfx.components.broken", False)]
result = await import_langflow_components()
# Should return empty dict, not raise
assert "components" in result
assert len(result["components"]) == 0
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/test_component_index.py",
"license": "MIT License",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/logging/logger.py | """Backwards compatibility module for lfx.logging.logger.
This module provides backwards compatibility for code that imports from lfx.logging.logger.
All functionality has been moved to lfx.log.logger.
"""
# Ensure we maintain all the original exports
from lfx.log.logger import (
InterceptHandler,
LogConfig,
configure,
logger,
setup_gunicorn_logger,
setup_uvicorn_logger,
)
__all__ = [
"InterceptHandler",
"LogConfig",
"configure",
"logger",
"setup_gunicorn_logger",
"setup_uvicorn_logger",
]
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/logging/logger.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/tests/unit/cli/test_run_starter_projects_backward_compatibility.py | """Test run command with starter project templates from 1.6.0 for backwards compatibility.
Tests that all starter project JSON files from tag 1.6.0 can be loaded by lfx run command
without import errors for langflow modules. We expect execution errors
(missing API keys, etc.) but no import/module errors.
This ensures backwards compatibility with existing starter projects.
"""
from pathlib import Path
import pytest
from lfx.__main__ import app
from typer.testing import CliRunner
runner = CliRunner()
def get_starter_projects_path() -> Path:
"""Get path to starter projects directory.
Returns:
Path to the 1.6.0 starter projects directory in tests/data.
"""
test_file_path = Path(__file__).resolve()
return test_file_path.parent.parent.parent / "data" / "starter_projects_1_6_0"
def get_starter_project_files():
"""Get all starter project JSON files for parameterization.
Returns files in sorted order for deterministic test execution.
"""
starter_path = get_starter_projects_path()
if not starter_path.exists():
return []
return sorted(starter_path.glob("*.json"))
class TestRunStarterProjectsBackwardCompatibility:
"""Test run command with starter project templates from 1.6.0 for backwards compatibility."""
def test_starter_projects_1_6_0_exist(self):
"""Test that 1.6.0 starter projects directory exists and has templates."""
path = get_starter_projects_path()
if not path.exists():
pytest.fail(f"1.6.0 starter projects cache directory not found: {path}")
templates = get_starter_project_files()
if len(templates) == 0:
pytest.fail(f"No 1.6.0 starter project files found in cache: {path}")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_1_6_0_starter_project_no_import_errors(self, template_file):
"""Test that 1.6.0 starter project can be loaded without langflow or lfx import errors.
We expect execution errors (missing API keys, missing inputs, etc.)
but there should be NO errors about importing langflow or lfx modules.
Note: Some 1.6.0 starter projects contain components with import bugs that were
fixed in later versions. These are marked as expected failures.
"""
# Known failing starter projects due to component-level import bugs in 1.6.0
known_failing_projects = {
"News Aggregator.json": "Contains SaveToFile component with langflow.api import bug "
"(fixed in later versions)"
}
if template_file.name in known_failing_projects:
pytest.xfail(f"Known 1.6.0 component bug: {known_failing_projects[template_file.name]}")
# Run the command with --no-check-variables to skip variable validation
# Use verbose mode to get detailed error messages in stderr
result = runner.invoke(
app,
["run", "--verbose", "--no-check-variables", str(template_file), "test input"],
)
# The command will likely fail due to missing API keys, etc.
# But we're checking that there are no import errors
# Use the combined output provided by Click/Typer
all_output = result.output
# Check for import errors related to langflow or lfx
if "ModuleNotFoundError" in all_output or "ImportError" in all_output or "Module" in all_output:
# Check for langflow import errors
if "No module named 'langflow'" in all_output or "Module langflow" in all_output:
# Extract the specific error for better debugging
error_line = ""
for line in all_output.split("\n"):
if "langflow" in line and ("No module named" in line or "Module" in line):
error_line = line.strip()
break
pytest.fail(f"Langflow import error found in 1.6.0 template {template_file.name}.\nError: {error_line}")
# Check for lfx import errors (these indicate structural issues)
if "No module named 'lfx." in all_output or "Module lfx." in all_output:
# Extract the specific error for better debugging
import re
# Remove ANSI color codes for cleaner output
clean_output = re.sub(r"\x1b\[[0-9;]*m", "", all_output)
error_lines = []
for line in clean_output.split("\n"):
if "lfx" in line and ("No module named" in line or "Module lfx." in line):
# Extract just the module name from various error formats
if "No module named" in line:
match = re.search(r"No module named ['\"]([^'\"]+)['\"]", line)
if match:
error_lines.append(f" - Missing module: {match.group(1)}")
elif "Module lfx." in line and "not found" in line:
match = re.search(r"Module (lfx\.[^\s]+)", line)
if match:
error_lines.append(f" - Missing module: {match.group(1)}")
# Deduplicate while preserving order
seen = set()
unique_errors = []
for error in error_lines:
if error not in seen:
seen.add(error)
unique_errors.append(error)
error_detail = "\n".join(unique_errors[:5]) # Show first 5 unique lfx errors
pytest.fail(
f"LFX import error found in 1.6.0 template {template_file.name}.\n"
f"This indicates lfx internal structure issues.\n"
f"Missing modules:\n{error_detail}"
)
# Check for other critical import errors
if "cannot import name" in all_output and ("langflow" in all_output or "lfx" in all_output):
# Extract the specific import error
error_line = ""
for line in all_output.split("\n"):
if "cannot import name" in line:
error_line = line.strip()
break
pytest.fail(f"Import error found in 1.6.0 template {template_file.name}.\nError: {error_line}")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_1_6_0_starter_project_format_options(self, template_file):
"""Test that 1.6.0 starter projects can be run with different output formats.
This tests that the basic command parsing works, even if execution fails.
"""
formats = ["json", "text", "message", "result"]
for fmt in formats:
result = runner.invoke(
app,
["run", "--format", fmt, "--no-check-variables", str(template_file), "test"],
)
# We don't check exit code as it may fail due to missing dependencies
# We just want to ensure the command is parsed and attempted
# Check that we got some output (even if it's an error)
if len(result.output) == 0:
pytest.fail(f"No output for 1.6.0 template {template_file.name} with format {fmt}")
@pytest.mark.xfail(reason="CLI --format option doesn't apply to error messages")
@pytest.mark.parametrize("template_file", get_starter_project_files()[:1], ids=lambda x: x.name)
def test_run_1_6_0_format_option_applies_to_errors(self, template_file):
"""Test that --format option applies to error messages.
Currently fails: Error messages are always returned as JSON regardless of --format.
This test documents the expected behavior for future CLI fixes.
"""
import json as json_module
# Test with text format - errors should be plain text, not JSON
result = runner.invoke(
app,
["run", "--format", "text", "--no-check-variables", str(template_file), "test"],
)
# If we get an error (which we expect due to missing API keys), it should be plain text
if result.exit_code != 0:
# Should NOT be valid JSON when format is "text"
try:
json_module.loads(result.output)
pytest.fail(
"Error output is JSON format when --format text was specified. "
"Error messages should respect the --format option."
)
except json_module.JSONDecodeError:
# This is the expected behavior - plain text error
pass
@pytest.mark.xfail(reason="CLI --format option doesn't apply when --verbose is used")
@pytest.mark.parametrize("template_file", get_starter_project_files()[:1], ids=lambda x: x.name)
def test_run_1_6_0_format_option_applies_with_verbose(self, template_file):
"""Test that --format option applies even when --verbose is used.
Currently fails: --verbose output doesn't conform to --format specification.
This test documents the expected behavior for future CLI fixes.
"""
import json as json_module
# Test with JSON format + verbose - all output should be valid JSON
result = runner.invoke(
app,
["run", "--format", "json", "--verbose", "--no-check-variables", str(template_file), "test"],
)
# With --format json, even verbose output should be parseable as JSON
# (or at least the final output should be JSON without mixed text logs)
lines = result.output.strip().split("\n")
last_line = lines[-1] if lines else ""
try:
json_module.loads(last_line)
except json_module.JSONDecodeError:
pytest.fail(
"With --format json and --verbose, expected final output to be valid JSON. "
"Verbose logs should not interfere with JSON output format."
)
@pytest.mark.xfail(
reason="1.6.0 basic templates have langflow import issues - components expect langflow package to be available"
)
def test_run_basic_1_6_0_starter_projects_detailed(self):
"""Test basic 1.6.0 starter projects that should have minimal dependencies."""
basic_templates = [
"Basic Prompting.json",
"Basic Prompt Chaining.json",
]
starter_path = get_starter_projects_path()
for template_name in basic_templates:
template_file = starter_path / template_name
if not template_file.exists():
continue
result = runner.invoke(
app,
["run", "--verbose", "--no-check-variables", str(template_file), "Hello test"],
)
# These basic templates might still fail due to missing LLM API keys
# but should not have import errors
all_output = result.output
# More specific checks for these basic templates
if "No module named 'langflow'" in all_output:
pytest.fail(f"Langflow import error in 1.6.0 template {template_name}")
# Check for module not found errors specifically related to langflow
# (Settings service errors are runtime errors, not import errors)
if "ModuleNotFoundError" in all_output and "langflow" in all_output and "lfx.services" not in all_output:
# This is an actual langflow import error, not an internal lfx error
pytest.fail(f"Module not found error for langflow in 1.6.0 template {template_name}")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_1_6_0_starter_project_with_stdin(self, template_file):
"""Test loading 1.6.0 starter projects via stdin."""
with template_file.open(encoding="utf-8") as f:
json_content = f.read()
result = runner.invoke(
app,
["run", "--stdin", "--no-check-variables", "--input-value", "test"],
input=json_content,
)
# Check that the command attempted to process the input
if len(result.output) == 0:
pytest.fail("No output from 1.6.0 stdin test")
# Verify no import errors
all_output = result.output
if "No module named 'langflow'" in all_output:
pytest.fail("Langflow import error in 1.6.0 stdin test")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_1_6_0_starter_project_inline_json(self, template_file):
"""Test loading 1.6.0 starter projects via --flow-json option."""
with template_file.open(encoding="utf-8") as f:
json_content = f.read()
result = runner.invoke(
app,
["run", "--flow-json", json_content, "--no-check-variables", "--input-value", "test"],
)
# Check that the command attempted to process the input
if len(result.output) == 0:
pytest.fail("No output from 1.6.0 inline JSON test")
# Verify no import errors
all_output = result.output
if "No module named 'langflow'" in all_output:
pytest.fail("Langflow import error in 1.6.0 inline JSON test")
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/cli/test_run_starter_projects_backward_compatibility.py",
"license": "MIT License",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/services/settings/test_mcp_composer_version.py | """Tests for mcp_composer_version validator in Settings."""
from lfx.services.settings.base import Settings
def test_bare_version_gets_tilde_equals_prefix(monkeypatch):
"""Test that a bare version like '0.1.0.7' gets ~= prefix added."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "~=0.1.0.7"
def test_version_with_tilde_equals_is_preserved(monkeypatch):
"""Test that a version with ~= is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "~=0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "~=0.1.0.7"
def test_version_with_greater_than_or_equal_is_preserved(monkeypatch):
"""Test that a version with >= is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", ">=0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == ">=0.1.0.7"
def test_version_with_exact_match_is_preserved(monkeypatch):
"""Test that a version with == is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "==0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "==0.1.0.7"
def test_version_with_less_than_or_equal_is_preserved(monkeypatch):
"""Test that a version with <= is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "<=0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "<=0.1.0.7"
def test_version_with_not_equal_is_preserved(monkeypatch):
"""Test that a version with != is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "!=0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "!=0.1.0.7"
def test_version_with_less_than_is_preserved(monkeypatch):
"""Test that a version with < is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "<0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "<0.1.0.7"
def test_version_with_greater_than_is_preserved(monkeypatch):
"""Test that a version with > is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", ">0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == ">0.1.0.7"
def test_version_with_arbitrary_equality_is_preserved(monkeypatch):
"""Test that a version with === is preserved as-is."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "===0.1.0.7")
settings = Settings()
assert settings.mcp_composer_version == "===0.1.0.7"
def test_empty_version_gets_default(monkeypatch):
"""Test that empty string gets default value."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "")
settings = Settings()
assert settings.mcp_composer_version == "==0.1.0.8.10"
def test_no_env_var_uses_default(monkeypatch):
"""Test that missing env var uses default value."""
monkeypatch.delenv("LANGFLOW_MCP_COMPOSER_VERSION", raising=False)
settings = Settings()
assert settings.mcp_composer_version == "==0.1.0.8.10"
def test_three_part_version_gets_prefix(monkeypatch):
"""Test that a 3-part version like '1.2.3' gets ~= prefix."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "1.2.3")
settings = Settings()
assert settings.mcp_composer_version == "~=1.2.3"
def test_two_part_version_gets_prefix(monkeypatch):
"""Test that a 2-part version like '1.2' gets ~= prefix."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "1.2")
settings = Settings()
assert settings.mcp_composer_version == "~=1.2"
def test_single_digit_version_gets_prefix(monkeypatch):
"""Test that a single digit version like '1' gets ~= prefix."""
monkeypatch.setenv("LANGFLOW_MCP_COMPOSER_VERSION", "1")
settings = Settings()
assert settings.mcp_composer_version == "~=1"
def test_validator_directly():
"""Test calling the validator method directly."""
# Test bare version
result = Settings.validate_mcp_composer_version("0.1.0.7")
assert result == "~=0.1.0.7"
# Test with specifier
result = Settings.validate_mcp_composer_version(">=0.1.0.7")
assert result == ">=0.1.0.7"
# Test empty
result = Settings.validate_mcp_composer_version("")
assert result == "==0.1.0.8.10"
# Test None
result = Settings.validate_mcp_composer_version(None)
assert result == "==0.1.0.8.10"
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/services/settings/test_mcp_composer_version.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/src/lfx/tests/unit/components/vlmrun/test_vlmrun_transcription.py | from unittest.mock import Mock, patch
import pytest
from langflow.schema.data import Data
from lfx.components.vlmrun import VLMRunTranscription
from tests.base import ComponentTestBaseWithoutClient
class TestVLMRunTranscription(ComponentTestBaseWithoutClient):
"""Test class for VLM Run Transcription component."""
def _create_mock_usage(self, total_tokens=100, prompt_tokens=70, completion_tokens=30):
"""Helper method to create a mock usage object."""
mock_usage = Mock()
mock_usage.configure_mock(
total_tokens=total_tokens,
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
model_dump=Mock(return_value={"total_tokens": total_tokens}),
dict=Mock(return_value={"total_tokens": total_tokens}),
)
return mock_usage
def _create_mock_response(self, prediction_id, segments, duration, usage, status="completed"):
"""Helper method to create a mock VLM Run response object."""
mock_response = Mock()
mock_response.configure_mock(
id=prediction_id,
response={"segments": segments, "metadata": {"duration": duration}},
usage=usage,
status=status,
)
return mock_response
def _create_mock_vlm_client(self, audio_response=None, video_response=None):
"""Helper method to create a mock VLM Run client."""
mock_client = Mock()
if audio_response:
mock_client.audio.generate.return_value = audio_response
mock_client.predictions.wait.return_value = audio_response
if video_response:
mock_client.video.generate.return_value = video_response
mock_client.predictions.wait.return_value = video_response
return mock_client
@pytest.fixture
def component_class(self):
"""Return the component class to test."""
return VLMRunTranscription
@pytest.fixture
def default_kwargs(self):
"""Return default kwargs for component initialization."""
return {
"api_key": "test-api-key", # pragma: allowlist secret
"media_type": "audio",
"_session_id": "test-session-123",
}
@pytest.fixture
def file_names_mapping(self):
"""Return file names mapping for different versions."""
# Since this is a new component, return empty list
return []
def test_component_metadata(self, component_class):
"""Test component metadata attributes."""
# Using pytest comparison for better error messages
if component_class.display_name != "VLM Run Transcription":
pytest.fail(f"Expected display_name to be 'VLM Run Transcription', got '{component_class.display_name}'")
if (
component_class.description
!= "Extract structured data from audio and video using [VLM Run AI](https://app.vlm.run)"
):
pytest.fail(f"Expected description mismatch, got '{component_class.description}'")
if component_class.documentation != "https://docs.vlm.run":
pytest.fail(f"Expected documentation to be 'https://docs.vlm.run', got '{component_class.documentation}'")
if component_class.icon != "VLMRun":
pytest.fail(f"Expected icon to be 'VLMRun', got '{component_class.icon}'")
if component_class.beta is not True:
pytest.fail(f"Expected beta to be True, got '{component_class.beta}'")
def test_component_inputs(self, component_class):
"""Test component input definitions."""
component = component_class()
inputs_dict = {inp.name: inp for inp in component.inputs}
# Check API key input
if "api_key" not in inputs_dict:
pytest.fail("api_key not found in inputs_dict")
if inputs_dict["api_key"].display_name != "VLM Run API Key":
pytest.fail(
f"Expected api_key display_name to be 'VLM Run API Key', got '{inputs_dict['api_key'].display_name}'"
)
if inputs_dict["api_key"].required is not True:
pytest.fail(f"Expected api_key to be required, got {inputs_dict['api_key'].required}")
# Check media type input
if "media_type" not in inputs_dict:
pytest.fail("media_type not found in inputs_dict")
if inputs_dict["media_type"].display_name != "Media Type":
pytest.fail(
f"Expected media_type display_name to be 'Media Type', got '{inputs_dict['media_type'].display_name}'"
)
if inputs_dict["media_type"].options != ["audio", "video"]:
pytest.fail(
f"Expected media_type options to be ['audio', 'video'], got {inputs_dict['media_type'].options}"
)
if inputs_dict["media_type"].value != "audio":
pytest.fail(f"Expected media_type value to be 'audio', got '{inputs_dict['media_type'].value}'")
# Check media files input
if "media_files" not in inputs_dict:
pytest.fail("media_files not found in inputs_dict")
if inputs_dict["media_files"].display_name != "Media Files":
pytest.fail(
f"Expected media_files display_name to be 'Media Files', "
f"got '{inputs_dict['media_files'].display_name}'"
)
if inputs_dict["media_files"].is_list is not True:
pytest.fail(f"Expected media_files.is_list to be True, got {inputs_dict['media_files'].is_list}")
if inputs_dict["media_files"].required is not False:
pytest.fail(f"Expected media_files to not be required, got {inputs_dict['media_files'].required}")
# Check media URL input
if "media_url" not in inputs_dict:
pytest.fail("media_url not found in inputs_dict")
if inputs_dict["media_url"].display_name != "Media URL":
pytest.fail(
f"Expected media_url display_name to be 'Media URL', got '{inputs_dict['media_url'].display_name}'"
)
if inputs_dict["media_url"].required is not False:
pytest.fail(f"Expected media_url to not be required, got {inputs_dict['media_url'].required}")
if inputs_dict["media_url"].advanced is not True:
pytest.fail(f"Expected media_url to be advanced, got {inputs_dict['media_url'].advanced}")
def test_component_outputs(self, component_class):
"""Test component output definitions."""
component = component_class()
outputs_dict = {out.name: out for out in component.outputs}
if "result" not in outputs_dict:
pytest.fail("result not found in outputs_dict")
if outputs_dict["result"].display_name != "Result":
pytest.fail(f"Expected result display_name to be 'Result', got '{outputs_dict['result'].display_name}'")
if outputs_dict["result"].method != "process_media":
pytest.fail(f"Expected result method to be 'process_media', got '{outputs_dict['result'].method}'")
def test_no_input_validation(self, component_class, default_kwargs):
"""Test validation when no media input is provided."""
component = component_class(**default_kwargs)
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "error" not in result.data:
pytest.fail("error not found in result.data")
if result.data["error"] != "Either media files or media URL must be provided":
pytest.fail(f"Expected error message mismatch, got '{result.data['error']}'")
if component.status != "Either media files or media URL must be provided":
pytest.fail(f"Expected status mismatch, got '{component.status}'")
@patch("builtins.__import__")
def test_vlmrun_import_error(self, mock_import, component_class, default_kwargs):
"""Test handling of VLM Run SDK import error."""
# Configure mock import to raise ImportError for vlmrun.client
original_import = __import__
def side_effect(name, *args):
if name == "vlmrun.client":
error_msg = "No module named 'vlmrun'"
raise ImportError(error_msg)
return original_import(name, *args)
mock_import.side_effect = side_effect
component = component_class(**default_kwargs)
component.media_files = ["/path/to/test.mp3"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "error" not in result.data:
pytest.fail("error not found in result.data")
if "VLM Run SDK not installed" not in result.data["error"]:
pytest.fail(f"Expected 'VLM Run SDK not installed' in error message, got '{result.data['error']}'")
def test_frontend_node_generation(self, component_class, default_kwargs):
"""Test frontend node generation."""
component = component_class(**default_kwargs)
frontend_node = component.to_frontend_node()
# Verify node structure
if frontend_node is None:
pytest.fail("frontend_node is None")
if not isinstance(frontend_node, dict):
pytest.fail(f"Expected frontend_node to be dict, got {type(frontend_node)}")
if "data" not in frontend_node:
pytest.fail("data not found in frontend_node")
if "type" not in frontend_node["data"]:
pytest.fail("type not found in frontend_node['data']")
node_data = frontend_node["data"]["node"]
if "description" not in node_data:
pytest.fail("description not found in node_data")
if "icon" not in node_data:
pytest.fail("icon not found in node_data")
if "template" not in node_data:
pytest.fail("template not found in node_data")
# Verify template has correct inputs
template = node_data["template"]
if "api_key" not in template:
pytest.fail("api_key not found in template")
if "media_type" not in template:
pytest.fail("media_type not found in template")
if "media_files" not in template:
pytest.fail("media_files not found in template")
if "media_url" not in template:
pytest.fail("media_url not found in template")
def test_input_field_types(self, component_class):
"""Test that input fields have correct types."""
component = component_class()
inputs_dict = {inp.name: inp for inp in component.inputs}
# Check that media_files accepts the expected file types
media_files_input = inputs_dict["media_files"]
expected_audio_types = ["mp3", "wav", "m4a", "flac", "ogg", "opus", "webm", "aac"]
expected_video_types = ["mp4", "mov", "avi", "mkv", "flv", "wmv", "m4v"]
expected_types = expected_audio_types + expected_video_types
for file_type in expected_types:
if file_type not in media_files_input.file_types:
pytest.fail(f"Expected file type '{file_type}' not found in media_files_input.file_types")
def test_component_initialization(self, component_class, default_kwargs):
"""Test component can be initialized with default kwargs."""
component = component_class(**default_kwargs)
if component.api_key != "test-api-key": # pragma: allowlist secret
pytest.fail(f"Expected api_key to be 'test-api-key', got '{component.api_key}'")
if component.media_type != "audio":
pytest.fail(f"Expected media_type to be 'audio', got '{component.media_type}'")
if not hasattr(component, "media_files"):
pytest.fail("component does not have 'media_files' attribute")
if not hasattr(component, "media_url"):
pytest.fail("component does not have 'media_url' attribute")
def test_media_type_options(self, component_class):
"""Test media type dropdown has correct options."""
component = component_class()
inputs_dict = {inp.name: inp for inp in component.inputs}
media_type_input = inputs_dict["media_type"]
if media_type_input.options != ["audio", "video"]:
pytest.fail(f"Expected media_type options to be ['audio', 'video'], got {media_type_input.options}")
if media_type_input.value != "audio": # Default value
pytest.fail(f"Expected media_type value to be 'audio', got '{media_type_input.value}'")
def test_api_key_info_contains_url(self, component_class):
"""Test that API key input contains app URL for user guidance."""
component = component_class()
inputs_dict = {inp.name: inp for inp in component.inputs}
api_key_input = inputs_dict["api_key"]
if "https://app.vlm.run" not in api_key_input.info:
pytest.fail(f"Expected 'https://app.vlm.run' in api_key info, got '{api_key_input.info}'")
@patch("vlmrun.client.VLMRun")
def test_single_audio_file_with_mocked_client(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test single audio file processing with mocked VLMRun client."""
# Create mock objects using helper methods
mock_usage = self._create_mock_usage(total_tokens=150, prompt_tokens=100, completion_tokens=50)
segments = [{"audio": {"content": "Hello world"}}, {"audio": {"content": "This is a test"}}]
mock_response = self._create_mock_response("test-prediction-123", segments, 10.5, mock_usage)
# Configure mock client
mock_client = self._create_mock_vlm_client(audio_response=mock_response)
mock_vlmrun_class.return_value = mock_client
component = component_class(**default_kwargs)
component.media_files = ["/path/to/test.mp3"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "results" not in result.data:
pytest.fail("results not found in result.data")
if len(result.data["results"]) != 1:
pytest.fail(f"Expected 1 result, got {len(result.data['results'])}")
audio_result = result.data["results"][0]
if audio_result["prediction_id"] != "test-prediction-123":
pytest.fail(f"Expected prediction_id to be 'test-prediction-123', got '{audio_result['prediction_id']}'")
if audio_result["transcription"] != "Hello world This is a test":
pytest.fail(f"Expected transcription mismatch, got '{audio_result['transcription']}'")
expected_duration = 10.5
if audio_result["metadata"]["duration"] != pytest.approx(expected_duration):
pytest.fail(f"Expected duration to be {expected_duration}, got {audio_result['metadata']['duration']}")
if audio_result["status"] != "completed":
pytest.fail(f"Expected status to be 'completed', got '{audio_result['status']}'")
expected_tokens = 150
if audio_result["usage"].total_tokens != expected_tokens:
pytest.fail(f"Expected total_tokens to be {expected_tokens}, got {audio_result['usage'].total_tokens}")
if "filename" not in audio_result:
pytest.fail("filename not found in audio_result")
if audio_result["filename"] != "test.mp3":
pytest.fail(f"Expected filename to be 'test.mp3', got '{audio_result['filename']}'")
# Verify the client was called correctly
mock_client.audio.generate.assert_called_once()
mock_client.predictions.wait.assert_called_once_with(mock_response.id, timeout=600)
# Verify API key was passed correctly
mock_vlmrun_class.assert_called_once_with(
api_key="test-api-key" # pragma: allowlist secret
)
@patch("vlmrun.client.VLMRun")
def test_video_file_with_audio_content(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test video file processing that includes both video and audio content."""
# Create mock objects using helper methods
mock_usage = self._create_mock_usage(total_tokens=300, prompt_tokens=200, completion_tokens=100)
segments = [
{"video": {"content": "Scene description 1"}, "audio": {"content": "Dialog line 1"}},
{"video": {"content": "Scene description 2"}, "audio": {"content": "Dialog line 2"}},
{"video": {"content": "Scene description 3"}},
]
mock_response = self._create_mock_response("test-video-456", segments, 120.0, mock_usage)
# Configure mock client
mock_client = self._create_mock_vlm_client(video_response=mock_response)
mock_vlmrun_class.return_value = mock_client
component = component_class(**default_kwargs)
component.media_type = "video"
component.media_files = ["/path/to/test.mp4"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "results" not in result.data:
pytest.fail("results not found in result.data")
if len(result.data["results"]) != 1:
pytest.fail(f"Expected 1 result, got {len(result.data['results'])}")
video_result = result.data["results"][0]
if video_result["prediction_id"] != "test-video-456":
pytest.fail(f"Expected prediction_id to be 'test-video-456', got '{video_result['prediction_id']}'")
# Check that transcription includes both video content and audio in brackets
expected_transcription = (
"Scene description 1 [Audio: Dialog line 1] Scene description 2 [Audio: Dialog line 2] Scene description 3"
)
if video_result["transcription"] != expected_transcription:
pytest.fail(f"Expected transcription mismatch, got '{video_result['transcription']}'")
if video_result["metadata"]["media_type"] != "video":
pytest.fail(f"Expected media_type to be 'video', got '{video_result['metadata']['media_type']}'")
expected_video_duration = 120.0
if video_result["metadata"]["duration"] != pytest.approx(expected_video_duration):
pytest.fail(
f"Expected duration to be {expected_video_duration}, got {video_result['metadata']['duration']}"
)
if video_result["status"] != "completed":
pytest.fail(f"Expected status to be 'completed', got '{video_result['status']}'")
expected_video_tokens = 300
if video_result["usage"].total_tokens != expected_video_tokens:
pytest.fail(
f"Expected total_tokens to be {expected_video_tokens}, got {video_result['usage'].total_tokens}"
)
# Verify the client was called correctly
mock_client.video.generate.assert_called_once()
mock_client.predictions.wait.assert_called_once_with(mock_response.id, timeout=600)
# Verify API key was passed correctly
mock_vlmrun_class.assert_called_once_with(api_key="test-api-key") # pragma: allowlist secret
@patch("vlmrun.client.VLMRun")
def test_multiple_files_combined_transcription(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test processing multiple files returns combined transcription."""
# Create mock objects using helper methods
mock_usage_1 = self._create_mock_usage(total_tokens=50)
mock_usage_2 = self._create_mock_usage(total_tokens=60)
segments_1 = [{"audio": {"content": "File 1 content"}}]
segments_2 = [{"audio": {"content": "File 2 content"}}]
mock_response_1 = self._create_mock_response("pred-1", segments_1, 5, mock_usage_1)
mock_response_2 = self._create_mock_response("pred-2", segments_2, 7, mock_usage_2)
# Configure mock client to return different responses for each call
mock_client = Mock()
mock_client.audio.generate.side_effect = [mock_response_1, mock_response_2]
mock_client.predictions.wait.side_effect = [mock_response_1, mock_response_2]
mock_vlmrun_class.return_value = mock_client
component = component_class(**default_kwargs)
component.media_files = ["/path/to/file1.mp3", "/path/to/file2.mp3"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "results" not in result.data:
pytest.fail("results not found in result.data")
expected_file_count = 2
if len(result.data["results"]) != expected_file_count:
pytest.fail(f"Expected {expected_file_count} results, got {len(result.data['results'])}")
if result.data["total_files"] != expected_file_count:
pytest.fail(f"Expected total_files to be {expected_file_count}, got {result.data['total_files']}")
# Verify individual transcription results are accessible
if result.data["results"][0]["transcription"] != "File 1 content":
pytest.fail(
f"Expected first transcription to be 'File 1 content', "
f"got '{result.data['results'][0]['transcription']}'"
)
if result.data["results"][1]["transcription"] != "File 2 content":
pytest.fail(
f"Expected second transcription to be 'File 2 content', "
f"got '{result.data['results'][1]['transcription']}'"
)
if result.data["results"][0]["filename"] != "file1.mp3":
pytest.fail(f"Expected first filename to be 'file1.mp3', got '{result.data['results'][0]['filename']}'")
if result.data["results"][1]["filename"] != "file2.mp3":
pytest.fail(f"Expected second filename to be 'file2.mp3', got '{result.data['results'][1]['filename']}'")
# Verify the client was called correctly for both files
if mock_client.audio.generate.call_count != expected_file_count:
pytest.fail(
f"Expected audio.generate to be called {expected_file_count} times, "
f"got {mock_client.audio.generate.call_count}"
)
if mock_client.predictions.wait.call_count != expected_file_count:
pytest.fail(
f"Expected predictions.wait to be called {expected_file_count} times, "
f"got {mock_client.predictions.wait.call_count}"
)
# Verify API key was passed correctly
mock_vlmrun_class.assert_called_once_with(api_key="test-api-key") # pragma: allowlist secret
# Verify predictions.wait was called with correct IDs and timeout
wait_calls = mock_client.predictions.wait.call_args_list
default_timeout = 600
if wait_calls[0][0][0] != "pred-1":
pytest.fail(f"Expected first wait call ID to be 'pred-1', got '{wait_calls[0][0][0]}'")
if wait_calls[0][1]["timeout"] != default_timeout:
pytest.fail(f"Expected first wait call timeout to be {default_timeout}, got {wait_calls[0][1]['timeout']}")
if wait_calls[1][0][0] != "pred-2":
pytest.fail(f"Expected second wait call ID to be 'pred-2', got '{wait_calls[1][0][0]}'")
if wait_calls[1][1]["timeout"] != default_timeout:
pytest.fail(f"Expected second wait call timeout to be {default_timeout}, got {wait_calls[1][1]['timeout']}")
@patch("vlmrun.client.VLMRun")
def test_url_input_processing(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test processing media from URL."""
# Create mock objects using helper methods
mock_usage = self._create_mock_usage(total_tokens=75)
segments = [{"audio": {"content": "URL content"}}]
mock_response = self._create_mock_response("url-pred-789", segments, 15, mock_usage)
# Configure mock client
mock_client = Mock()
mock_client.audio.generate.return_value = mock_response
mock_client.predictions.wait.return_value = mock_response
mock_vlmrun_class.return_value = mock_client
component = component_class(**default_kwargs)
component.media_url = "https://example.com/media.mp3"
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "results" not in result.data:
pytest.fail("results not found in result.data")
audio_result = result.data["results"][0]
if "source" not in audio_result: # URL should use 'source' not 'filename'
pytest.fail("source not found in audio_result")
if audio_result["source"] != "https://example.com/media.mp3":
pytest.fail(f"Expected source to be 'https://example.com/media.mp3', got '{audio_result['source']}'")
# Verify the client was called with the correct URL and API key
mock_client.audio.generate.assert_called_once()
mock_client.predictions.wait.assert_called_once_with(mock_response.id, timeout=600)
mock_vlmrun_class.assert_called_once_with(api_key="test-api-key") # pragma: allowlist secret
# Verify URL parameter was passed correctly
call_args = mock_client.audio.generate.call_args
if "url" not in call_args.kwargs:
pytest.fail("url not found in call_args.kwargs")
if call_args.kwargs["url"] != "https://example.com/media.mp3":
pytest.fail(f"Expected url to be 'https://example.com/media.mp3', got '{call_args.kwargs['url']}'")
def test_advanced_inputs_added(self, component_class):
"""Test that new advanced inputs are properly added."""
component = component_class()
inputs_dict = {inp.name: inp for inp in component.inputs}
# Check timeout_seconds input
default_timeout = 600
if "timeout_seconds" not in inputs_dict:
pytest.fail("timeout_seconds not found in inputs_dict")
if inputs_dict["timeout_seconds"].display_name != "Timeout (seconds)":
pytest.fail(
f"Expected timeout_seconds display_name to be 'Timeout (seconds)', "
f"got '{inputs_dict['timeout_seconds'].display_name}'"
)
if inputs_dict["timeout_seconds"].value != default_timeout:
pytest.fail(
f"Expected timeout_seconds value to be {default_timeout}, got {inputs_dict['timeout_seconds'].value}"
)
if inputs_dict["timeout_seconds"].advanced is not True:
pytest.fail(f"Expected timeout_seconds to be advanced, got {inputs_dict['timeout_seconds'].advanced}")
# Check domain input
if "domain" not in inputs_dict:
pytest.fail("domain not found in inputs_dict")
if inputs_dict["domain"].display_name != "Processing Domain":
pytest.fail(
f"Expected domain display_name to be 'Processing Domain', got '{inputs_dict['domain'].display_name}'"
)
if inputs_dict["domain"].options != ["transcription"]:
pytest.fail(f"Expected domain options to be ['transcription'], got {inputs_dict['domain'].options}")
if inputs_dict["domain"].value != "transcription":
pytest.fail(f"Expected domain value to be 'transcription', got '{inputs_dict['domain'].value}'")
if inputs_dict["domain"].advanced is not True:
pytest.fail(f"Expected domain to be advanced, got {inputs_dict['domain'].advanced}")
@patch("vlmrun.client.VLMRun")
def test_api_error_handling(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test handling of API errors from VLM Run service."""
# Configure mock client to raise a ValueError (which gets caught specifically)
mock_client = Mock()
mock_client.audio.generate.side_effect = ValueError("API request failed")
mock_vlmrun_class.return_value = mock_client
component = component_class(**default_kwargs)
component.media_files = ["/path/to/test.mp3"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "error" not in result.data:
pytest.fail("error not found in result.data")
if "Processing failed: API request failed" not in result.data["error"]:
pytest.fail(
f"Expected 'Processing failed: API request failed' in error message, got '{result.data['error']}'"
)
if component.status is None:
pytest.fail("Expected component.status to not be None")
# Verify the client was called correctly
mock_client.audio.generate.assert_called_once()
mock_vlmrun_class.assert_called_once_with(api_key="test-api-key") # pragma: allowlist secret
@patch("vlmrun.client.VLMRun")
def test_timeout_parameter_usage(self, mock_vlmrun_class, component_class, default_kwargs):
"""Test that timeout parameter is passed to the VLM Run client."""
# Create mock objects using helper methods
mock_usage = self._create_mock_usage(total_tokens=100)
segments = [{"audio": {"content": "Test content"}}]
mock_response = self._create_mock_response("test-id", segments, 10, mock_usage)
mock_client = self._create_mock_vlm_client(audio_response=mock_response)
mock_vlmrun_class.return_value = mock_client
# Set custom timeout
component = component_class(**default_kwargs)
component.timeout_seconds = 300
component.media_files = ["/path/to/test.mp3"]
result = component.process_media()
if not isinstance(result, Data):
pytest.fail(f"Expected result to be Data instance, got {type(result)}")
if "results" not in result.data:
pytest.fail("results not found in result.data")
# Verify timeout was passed to predictions.wait
mock_client.predictions.wait.assert_called_once_with(mock_response.id, timeout=300)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/src/lfx/tests/unit/components/vlmrun/test_vlmrun_transcription.py",
"license": "MIT License",
"lines": 501,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/components/vlmrun/vlmrun_transcription.py | from pathlib import Path
from urllib.parse import urlparse
from langflow.custom.custom_component.component import Component
from langflow.io import (
DropdownInput,
FileInput,
IntInput,
MessageTextInput,
Output,
SecretStrInput,
)
from langflow.schema.data import Data
from loguru import logger
class VLMRunTranscription(Component):
display_name = "VLM Run Transcription"
description = "Extract structured data from audio and video using [VLM Run AI](https://app.vlm.run)"
documentation = "https://docs.vlm.run"
icon = "VLMRun"
beta = True
inputs = [
SecretStrInput(
name="api_key",
display_name="VLM Run API Key",
info="Get your API key from https://app.vlm.run",
required=True,
),
DropdownInput(
name="media_type",
display_name="Media Type",
options=["audio", "video"],
value="audio",
info="Select the type of media to process",
),
FileInput(
name="media_files",
display_name="Media Files",
file_types=[
"mp3",
"wav",
"m4a",
"flac",
"ogg",
"opus",
"webm",
"aac",
"mp4",
"mov",
"avi",
"mkv",
"flv",
"wmv",
"m4v",
],
info="Upload one or more audio/video files",
required=False,
is_list=True,
),
MessageTextInput(
name="media_url",
display_name="Media URL",
info="URL to media file (alternative to file upload)",
required=False,
advanced=True,
),
IntInput(
name="timeout_seconds",
display_name="Timeout (seconds)",
value=600,
info="Maximum time to wait for processing completion",
advanced=True,
),
DropdownInput(
name="domain",
display_name="Processing Domain",
options=["transcription"],
value="transcription",
info="Select the processing domain",
advanced=True,
),
]
outputs = [
Output(
display_name="Result",
name="result",
method="process_media",
),
]
def _check_inputs(self) -> str | None:
"""Validate that either media files or URL is provided."""
if not self.media_files and not self.media_url:
return "Either media files or media URL must be provided"
return None
def _import_vlmrun(self):
"""Import and return VLMRun client class."""
try:
from vlmrun.client import VLMRun
except ImportError as e:
error_msg = "VLM Run SDK not installed. Run: pip install 'vlmrun[all]'"
raise ImportError(error_msg) from e
else:
return VLMRun
def _generate_media_response(self, client, media_source):
"""Generate response for audio or video media."""
domain_str = f"{self.media_type}.{self.domain}"
if self.media_type == "audio":
if isinstance(media_source, Path):
return client.audio.generate(file=media_source, domain=domain_str, batch=True)
return client.audio.generate(url=media_source, domain=domain_str, batch=True)
# video
if isinstance(media_source, Path):
return client.video.generate(file=media_source, domain=domain_str, batch=True)
return client.video.generate(url=media_source, domain=domain_str, batch=True)
def _wait_for_response(self, client, response):
"""Wait for batch processing to complete if needed."""
if hasattr(response, "id"):
return client.predictions.wait(response.id, timeout=self.timeout_seconds)
return response
def _extract_transcription(self, segments: list) -> list[str]:
"""Extract transcription parts from segments."""
transcription_parts = []
for segment in segments:
if self.media_type == "audio" and "audio" in segment:
transcription_parts.append(segment["audio"].get("content", ""))
elif self.media_type == "video" and "video" in segment:
transcription_parts.append(segment["video"].get("content", ""))
# Also include audio if available for video
if "audio" in segment:
audio_content = segment["audio"].get("content", "")
if audio_content and audio_content.strip():
transcription_parts.append(f"[Audio: {audio_content}]")
return transcription_parts
def _create_result_dict(self, response, transcription_parts: list, source_name: str) -> dict:
"""Create a standardized result dictionary."""
response_data = response.response if hasattr(response, "response") else {}
result = {
"prediction_id": response.id if hasattr(response, "id") else None,
"transcription": " ".join(transcription_parts),
"full_response": response_data,
"metadata": {
"media_type": self.media_type,
"duration": response_data.get("metadata", {}).get("duration", 0),
},
"usage": response.usage if hasattr(response, "usage") else None,
"status": response.status if hasattr(response, "status") else "completed",
}
# Add source-specific field
parsed_url = urlparse(source_name)
if parsed_url.scheme in ["http", "https", "s3", "gs", "ftp", "ftps"]:
result["source"] = source_name
else:
result["filename"] = source_name
return result
def _process_single_media(self, client, media_source, source_name: str) -> dict:
"""Process a single media file or URL."""
response = self._generate_media_response(client, media_source)
response = self._wait_for_response(client, response)
response_data = response.response if hasattr(response, "response") else {}
segments = response_data.get("segments", [])
transcription_parts = self._extract_transcription(segments)
return self._create_result_dict(response, transcription_parts, source_name)
def process_media(self) -> Data:
"""Process audio or video file and extract structured data."""
# Validate inputs
error_msg = self._check_inputs()
if error_msg:
self.status = error_msg
return Data(data={"error": error_msg})
try:
# Import and initialize client
vlmrun_class = self._import_vlmrun()
client = vlmrun_class(api_key=self.api_key)
all_results = []
# Handle multiple files
if self.media_files:
files_to_process = self.media_files if isinstance(self.media_files, list) else [self.media_files]
for idx, media_file in enumerate(files_to_process):
self.status = f"Processing file {idx + 1} of {len(files_to_process)}..."
result = self._process_single_media(client, Path(media_file), Path(media_file).name)
all_results.append(result)
# Handle URL
elif self.media_url:
result = self._process_single_media(client, self.media_url, self.media_url)
all_results.append(result)
# Return clean, flexible output structure
output_data = {
"results": all_results,
"total_files": len(all_results),
}
self.status = f"Successfully processed {len(all_results)} file(s)"
return Data(data=output_data)
except ImportError as e:
self.status = str(e)
return Data(data={"error": str(e)})
except (ValueError, ConnectionError, TimeoutError) as e:
logger.opt(exception=True).debug("Error processing media with VLM Run")
error_msg = f"Processing failed: {e!s}"
self.status = error_msg
return Data(data={"error": error_msg})
except (AttributeError, KeyError, OSError) as e:
logger.opt(exception=True).debug("Unexpected error processing media with VLM Run")
error_msg = f"Unexpected error: {e!s}"
self.status = error_msg
return Data(data={"error": error_msg})
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/vlmrun/vlmrun_transcription.py",
"license": "MIT License",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/tests/unit/helpers/test_flow.py | """Unit tests for the langflow.helpers.flow module."""
import pytest
from lfx.utils.langflow_utils import has_langflow_memory
# Globals
_LANGFLOW_HELPER_MODULE_FLOW = "langflow.helpers.flow"
# Helper Functions
def is_helper_module(module, module_name):
return module.__module__ == module_name
# Test Scenarios
class TestDynamicImport:
"""Test dynamic imports of the langflow implementation."""
def test_langflow_available(self):
"""Test whether the langflow implementation is available."""
# Langflow implementation should be available
if not has_langflow_memory():
pytest.fail("Langflow implementation is not available")
def test_helpers_import_build_schema_from_inputs(self):
"""Test the lfx.helpers.build_schema_from_inputs import."""
try:
from lfx.helpers import build_schema_from_inputs
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.build_schema_from_inputs: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(build_schema_from_inputs, _LANGFLOW_HELPER_MODULE_FLOW)
def test_helpers_import_get_arg_names(self):
"""Test the lfx.helpers.get_arg_names import."""
try:
from lfx.helpers import get_arg_names
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.get_arg_names: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(get_arg_names, _LANGFLOW_HELPER_MODULE_FLOW)
def test_helpers_import_get_flow_inputs(self):
"""Test the lfx.helpers.get_flow_inputs import."""
try:
from lfx.helpers import get_flow_inputs
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.get_flow_inputs: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(get_flow_inputs, _LANGFLOW_HELPER_MODULE_FLOW)
def test_helpers_import_list_flows(self):
"""Test the lfx.helpers.list_flows import."""
try:
from lfx.helpers import list_flows
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.list_flows: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(list_flows, _LANGFLOW_HELPER_MODULE_FLOW)
def test_helpers_import_load_flow(self):
"""Test the lfx.helpers.load_flow import."""
try:
from lfx.helpers import load_flow
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.load_flow: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(load_flow, _LANGFLOW_HELPER_MODULE_FLOW)
def test_helpers_import_run_flow(self):
"""Test the lfx.helpers.run_flow import."""
try:
from lfx.helpers import run_flow
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.run_flow: {e}")
# Helper module should be the langflow implementation
assert is_helper_module(run_flow, _LANGFLOW_HELPER_MODULE_FLOW)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/helpers/test_flow.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/utils/langflow_utils.py | """Langflow environment utility functions."""
import importlib.util
from lfx.log.logger import logger
class _LangflowModule:
# Static variable
# Tri-state:
# - None: Langflow check not performed yet
# - True: Langflow is available
# - False: Langflow is not available
_available = None
@classmethod
def is_available(cls):
return cls._available
@classmethod
def set_available(cls, value):
cls._available = value
def has_langflow_memory():
"""Check if langflow.memory (with database support) and MessageTable are available."""
# TODO: REVISIT: Optimize this implementation later
# - Consider refactoring to use lazy loading or a more robust service discovery mechanism
# that can handle runtime availability changes.
# Use cached check from previous invocation (if applicable)
is_langflow_available = _LangflowModule.is_available()
if is_langflow_available is not None:
return is_langflow_available
# First check (lazy load and cache check)
module_spec = None
try:
module_spec = importlib.util.find_spec("langflow")
except ImportError:
pass
except (TypeError, ValueError) as e:
logger.error(f"Error encountered checking for langflow.memory: {e}")
is_langflow_available = module_spec is not None
_LangflowModule.set_available(is_langflow_available)
return is_langflow_available
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/utils/langflow_utils.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/tests/unit/helpers/test_flow.py | """Unit tests for the lfx.helpers.flow module."""
import pytest
from lfx.utils.langflow_utils import has_langflow_memory
# Globals
_LFX_HELPER_MODULE_FLOW = "lfx.helpers.flow"
# Helper Functions
def is_helper_module(module, module_name):
return module.__module__ == module_name
# Test Scenarios
class TestDynamicImport:
"""Test dynamic imports of the lfx implementation."""
def test_langflow_available(self):
"""Test whether the langflow implementation is available."""
# Langflow implementation should not be available
if has_langflow_memory():
pytest.fail("Langflow implementation is available")
def test_helpers_import_build_schema_from_inputs(self):
"""Test the lfx.helpers.build_schema_from_inputs import."""
try:
from lfx.helpers import build_schema_from_inputs
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.build_schema_from_inputs: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(build_schema_from_inputs, _LFX_HELPER_MODULE_FLOW)
def test_helpers_import_get_arg_names(self):
"""Test the lfx.helpers.get_arg_names import."""
try:
from lfx.helpers import get_arg_names
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.get_arg_names: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(get_arg_names, _LFX_HELPER_MODULE_FLOW)
def test_helpers_import_get_flow_inputs(self):
"""Test the lfx.helpers.get_flow_inputs import."""
try:
from lfx.helpers import get_flow_inputs
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.get_flow_inputs: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(get_flow_inputs, _LFX_HELPER_MODULE_FLOW)
def test_helpers_import_list_flows(self):
"""Test the lfx.helpers.list_flows import."""
try:
from lfx.helpers import list_flows
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.list_flows: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(list_flows, _LFX_HELPER_MODULE_FLOW)
def test_helpers_import_load_flow(self):
"""Test the lfx.helpers.load_flow import."""
try:
from lfx.helpers import load_flow
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.load_flow: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(load_flow, _LFX_HELPER_MODULE_FLOW)
def test_helpers_import_run_flow(self):
"""Test the lfx.helpers.run_flow import."""
try:
from lfx.helpers import run_flow
except (ImportError, ModuleNotFoundError) as e:
pytest.fail(f"Failed to dynamically import lfx.helpers.run_flow: {e}")
# Helper module should be the lfx implementation
assert is_helper_module(run_flow, _LFX_HELPER_MODULE_FLOW)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/helpers/test_flow.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/schema/test_cross_module.py | """Unit tests for cross-module isinstance functionality.
These tests verify that isinstance checks work correctly when classes are
re-exported from different modules (e.g., lfx.schema.Message vs langflow.schema.Message).
"""
from langflow.schema import Data as LangflowData
from langflow.schema import Message as LangflowMessage
from lfx.schema.data import Data as LfxData
from lfx.schema.message import Message as LfxMessage
class TestDuckTypingData:
"""Tests for duck-typing Data class across module boundaries."""
def test_lfx_data_isinstance_langflow_data(self):
"""Test that lfx.Data instance is recognized as langflow.Data."""
lfx_data = LfxData(data={"key": "value"})
assert isinstance(lfx_data, LangflowData)
def test_langflow_data_isinstance_lfx_data(self):
"""Test that langflow.Data instance is recognized as lfx.Data."""
langflow_data = LangflowData(data={"key": "value"})
assert isinstance(langflow_data, LfxData)
def test_data_equality_across_modules(self):
"""Test that Data objects from different modules are equal."""
lfx_data = LfxData(data={"key": "value"})
langflow_data = LangflowData(data={"key": "value"})
assert lfx_data == langflow_data
def test_data_interchangeable_in_functions(self):
"""Test that Data from different modules work interchangeably."""
def process_data(data: LangflowData) -> str:
return data.get_text()
lfx_data = LfxData(data={"text": "hello"})
# Should not raise type error
result = process_data(lfx_data)
assert result == "hello"
def test_data_model_dump_compatible(self):
"""Test that model_dump works across module boundaries."""
lfx_data = LfxData(data={"key": "value"})
langflow_data = LangflowData(**lfx_data.model_dump())
assert langflow_data.data == {"key": "value"}
class TestDuckTypingMessage:
"""Tests for duck-typing Message class across module boundaries."""
def test_lfx_message_isinstance_langflow_message(self):
"""Test that lfx.Message instance is recognized as langflow.Message."""
lfx_message = LfxMessage(text="hello")
assert isinstance(lfx_message, LangflowMessage)
def test_langflow_message_isinstance_lfx_message(self):
"""Test that langflow.Message instance is recognized as lfx.Message."""
langflow_message = LangflowMessage(text="hello")
assert isinstance(langflow_message, LfxMessage)
def test_message_equality_across_modules(self):
"""Test that Message objects from different modules are equal."""
lfx_message = LfxMessage(text="hello", sender="user")
langflow_message = LangflowMessage(text="hello", sender="user")
# Note: Direct equality might not work due to timestamps
assert lfx_message.text == langflow_message.text
assert lfx_message.sender == langflow_message.sender
def test_message_interchangeable_in_functions(self):
"""Test that Message from different modules work interchangeably."""
def process_message(msg: LangflowMessage) -> str:
return f"Processed: {msg.text}"
lfx_message = LfxMessage(text="hello")
# Should not raise type error
result = process_message(lfx_message)
assert result == "Processed: hello"
def test_message_model_dump_compatible(self):
"""Test that model_dump works across module boundaries."""
lfx_message = LfxMessage(text="hello", sender="user")
dump = lfx_message.model_dump()
langflow_message = LangflowMessage(**dump)
assert langflow_message.text == "hello"
assert langflow_message.sender == "user"
def test_message_inherits_data_duck_typing(self):
"""Test that Message inherits duck-typing from Data."""
lfx_message = LfxMessage(text="hello")
# Should work as Data too
assert isinstance(lfx_message, LangflowData)
assert isinstance(lfx_message, LfxData)
class TestDuckTypingWithInputs:
"""Tests for duck-typing with input validation."""
def test_message_input_accepts_lfx_message(self):
"""Test that MessageInput accepts lfx.Message."""
from lfx.inputs.inputs import MessageInput
lfx_message = LfxMessage(text="hello")
msg_input = MessageInput(name="test", value=lfx_message)
assert isinstance(msg_input.value, (LfxMessage, LangflowMessage))
def test_message_input_converts_cross_module(self):
"""Test that MessageInput handles cross-module Messages."""
from lfx.inputs.inputs import MessageInput
langflow_message = LangflowMessage(text="hello")
msg_input = MessageInput(name="test", value=langflow_message)
# Should recognize it as a Message
assert msg_input.value.text == "hello"
def test_data_input_accepts_lfx_data(self):
"""Test that DataInput accepts lfx.Data."""
from lfx.inputs.inputs import DataInput
lfx_data = LfxData(data={"key": "value"})
data_input = DataInput(name="test", value=lfx_data)
assert data_input.value == lfx_data
class TestDuckTypingEdgeCases:
"""Tests for edge cases in cross-module isinstance checks."""
def test_different_class_name_not_cross_module(self):
"""Test that objects with different class names are not recognized as cross-module compatible."""
from lfx.schema.cross_module import CrossModuleModel
class CustomModel(CrossModuleModel):
value: str
custom = CustomModel(value="test")
# Should not be considered a Data
assert not isinstance(custom, LfxData)
assert not isinstance(custom, LangflowData)
def test_non_pydantic_model_not_cross_module(self):
"""Test that non-Pydantic objects are not recognized as cross-module compatible."""
class FakeData:
def __init__(self):
self.data = {}
fake = FakeData()
assert not isinstance(fake, LfxData)
assert not isinstance(fake, LangflowData)
def test_missing_fields_not_cross_module(self):
"""Test that objects missing required fields are not recognized as cross-module compatible."""
from lfx.schema.cross_module import CrossModuleModel
class PartialData(CrossModuleModel):
text_key: str
partial = PartialData(text_key="text")
# Should not be considered a full Data (missing data field)
assert not isinstance(partial, LfxData)
assert not isinstance(partial, LangflowData)
class TestDuckTypingInputMixin:
"""Tests for cross-module isinstance checks in BaseInputMixin and subclasses."""
def test_base_input_mixin_is_cross_module(self):
"""Test that BaseInputMixin uses CrossModuleModel."""
from lfx.inputs.input_mixin import BaseInputMixin
from lfx.schema.cross_module import CrossModuleModel
# Check that BaseInputMixin inherits from CrossModuleModel
assert issubclass(BaseInputMixin, CrossModuleModel)
def test_input_subclasses_inherit_cross_module(self):
"""Test that all input types inherit cross-module support."""
from lfx.inputs.inputs import (
BoolInput,
DataInput,
FloatInput,
IntInput,
MessageInput,
StrInput,
)
from lfx.schema.cross_module import CrossModuleModel
for input_class in [StrInput, IntInput, FloatInput, BoolInput, DataInput, MessageInput]:
assert issubclass(input_class, CrossModuleModel)
def test_input_instances_work_across_modules(self):
"""Test that input instances work with duck-typing."""
from lfx.inputs.inputs import MessageInput
# Create with lfx Message
lfx_msg = LfxMessage(text="hello")
input1 = MessageInput(name="test1", value=lfx_msg)
# Create with langflow Message
langflow_msg = LangflowMessage(text="world")
input2 = MessageInput(name="test2", value=langflow_msg)
# Both should work
assert input1.value.text == "hello"
assert input2.value.text == "world"
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/schema/test_cross_module.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/schema/cross_module.py | """Cross-module BaseModel for handling re-exported classes.
This module provides a metaclass and base model that enable isinstance checks
to work across module boundaries for Pydantic models. This is particularly useful
when the same class is re-exported from different modules (e.g., lfx.Message vs
langflow.schema.Message) but Python's isinstance() checks fail due to different
module paths.
"""
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
class CrossModuleMeta(type(BaseModel)): # type: ignore[misc]
"""Metaclass that enables cross-module isinstance checks for Pydantic models.
This metaclass overrides __instancecheck__ to perform structural type checking
based on the model's fields rather than strict class identity. This allows
instances of the same model from different module paths to be recognized as
compatible.
"""
def __instancecheck__(cls, instance: Any) -> bool:
"""Check if instance is compatible with this class across module boundaries.
First performs a standard isinstance check. If that fails, falls back to
checking if the instance has all required Pydantic model attributes and
a compatible set of model fields.
Args:
instance: The object to check.
Returns:
bool: True if instance is compatible with this class.
"""
# First try standard isinstance check
if type.__instancecheck__(cls, instance):
return True
# If that fails, check for cross-module compatibility
# An object is cross-module compatible if it:
# 1. Has model_fields attribute (is a Pydantic model)
# 2. Has the same __class__.__name__
# 3. Has compatible model fields
if not hasattr(instance, "model_fields"):
return False
# Check if class names match
if instance.__class__.__name__ != cls.__name__:
return False
# Check if the instance has all required fields from cls
cls_fields = set(cls.model_fields.keys()) if hasattr(cls, "model_fields") else set()
instance_fields = set(instance.model_fields.keys())
# The instance must have at least the same fields as the class
# (it can have more, but not fewer required fields)
return cls_fields.issubset(instance_fields)
class CrossModuleModel(BaseModel, metaclass=CrossModuleMeta):
"""Base Pydantic model with cross-module isinstance support.
This class should be used as the base for models that may be re-exported
from different modules. It enables isinstance() checks to work across
module boundaries by using structural type checking.
Example:
>>> class Message(CrossModuleModel):
... text: str
...
>>> # Even if Message is imported from different paths:
>>> from lfx.schema.message import Message as LfxMessage
>>> from langflow.schema import Message as LangflowMessage
>>> msg = LfxMessage(text="hello")
>>> isinstance(msg, LangflowMessage) # True (with cross-module support)
"""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/schema/cross_module.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langflow-ai/langflow:src/backend/tests/locust/diagnose_remote.py | #!/usr/bin/env python3
"""Diagnostic tool for remote Langflow instances.
Helps debug connection issues and performance problems.
"""
import argparse
import json
import sys
import time
from typing import Any
import httpx
def test_connectivity(host: str) -> dict[str, Any]:
"""Test basic connectivity to the host."""
print(f"🔗 Testing connectivity to {host}")
results = {
"host": host,
"reachable": False,
"health_check": False,
"response_time_ms": None,
"error": None,
}
try:
start_time = time.time()
with httpx.Client(timeout=30.0) as client:
response = client.get(f"{host}/health")
end_time = time.time()
results["reachable"] = True
results["response_time_ms"] = round((end_time - start_time) * 1000, 2)
results["health_check"] = response.status_code == 200
results["status_code"] = response.status_code
if response.status_code == 200:
print(f" ✅ Health check passed ({results['response_time_ms']}ms)")
else:
print(f" ⚠️ Health check failed: {response.status_code}")
results["error"] = f"HTTP {response.status_code}"
except Exception as e:
results["error"] = f"{type(e).__name__}: {e}"
print(f" ❌ Connection failed: {results['error']}")
return results
def test_flow_endpoint(host: str, api_key: str, flow_id: str) -> dict[str, Any]:
"""Test a flow execution request."""
print("🎯 Testing flow execution")
results = {
"success": False,
"response_time_ms": None,
"status_code": None,
"error": None,
"has_outputs": False,
}
try:
url = f"{host}/api/v1/run/{flow_id}?stream=false"
payload = {
"input_value": "Hello, this is a diagnostic test",
"output_type": "chat",
"input_type": "chat",
"tweaks": {},
}
headers = {"x-api-key": api_key, "Content-Type": "application/json"}
start_time = time.time()
with httpx.Client(timeout=60.0) as client:
response = client.post(url, json=payload, headers=headers)
end_time = time.time()
results["response_time_ms"] = round((end_time - start_time) * 1000, 2)
results["status_code"] = response.status_code
if response.status_code == 200:
try:
data = response.json()
results["has_outputs"] = bool(data.get("outputs"))
results["success"] = results["has_outputs"]
if results["success"]:
print(f" ✅ Flow execution successful ({results['response_time_ms']}ms)")
else:
print(f" ⚠️ Flow executed but no outputs ({results['response_time_ms']}ms)")
results["error"] = "No outputs in response"
except Exception as e:
results["error"] = f"JSON decode error: {e}"
print(f" ❌ Invalid JSON response: {e}")
else:
results["error"] = f"HTTP {response.status_code}"
print(f" ❌ Flow execution failed: {response.status_code}")
print(f" Response: {response.text[:200]}...")
except Exception as e:
results["error"] = f"{type(e).__name__}: {e}"
print(f" ❌ Request failed: {results['error']}")
return results
def run_load_simulation(host: str, api_key: str, flow_id: str, num_requests: int = 10) -> dict[str, Any]:
"""Run a small load simulation to test performance."""
print(f"⚡ Running mini load test ({num_requests} requests)")
results = {
"total_requests": num_requests,
"successful_requests": 0,
"failed_requests": 0,
"connection_errors": 0,
"response_times": [],
"errors": [],
}
url = f"{host}/api/v1/run/{flow_id}?stream=false"
payload = {
"input_value": "Load test message",
"output_type": "chat",
"input_type": "chat",
"tweaks": {},
}
headers = {"x-api-key": api_key, "Content-Type": "application/json"}
for i in range(num_requests):
try:
start_time = time.time()
with httpx.Client(timeout=30.0) as client:
response = client.post(url, json=payload, headers=headers)
end_time = time.time()
response_time = round((end_time - start_time) * 1000, 2)
results["response_times"].append(response_time)
if response.status_code == 200:
try:
data = response.json()
if data.get("outputs"):
results["successful_requests"] += 1
print(f" ✅ Request {i + 1}: {response_time}ms")
else:
results["failed_requests"] += 1
results["errors"].append(f"Request {i + 1}: No outputs")
print(f" ⚠️ Request {i + 1}: No outputs ({response_time}ms)")
except Exception as e:
results["failed_requests"] += 1
results["errors"].append(f"Request {i + 1}: JSON error - {e}")
print(f" ❌ Request {i + 1}: JSON error ({response_time}ms)")
else:
results["failed_requests"] += 1
results["errors"].append(f"Request {i + 1}: HTTP {response.status_code}")
print(f" ❌ Request {i + 1}: HTTP {response.status_code} ({response_time}ms)")
except Exception as e:
results["connection_errors"] += 1
results["errors"].append(f"Request {i + 1}: {type(e).__name__} - {e}")
print(f" 💥 Request {i + 1}: Connection error - {e}")
# Calculate statistics
if results["response_times"]:
results["avg_response_time"] = round(sum(results["response_times"]) / len(results["response_times"]), 2)
results["min_response_time"] = min(results["response_times"])
results["max_response_time"] = max(results["response_times"])
return results
def main():
parser = argparse.ArgumentParser(description="Diagnose remote Langflow instance")
parser.add_argument("--host", required=True, help="Langflow host URL")
parser.add_argument("--api-key", help="API key for flow execution")
parser.add_argument("--flow-id", help="Flow ID for testing")
parser.add_argument("--load-test", type=int, default=0, help="Number of requests for mini load test")
parser.add_argument("--output", help="Save results to JSON file")
args = parser.parse_args()
print(f"🔍 Diagnosing Langflow instance: {args.host}")
print("=" * 60)
# Test basic connectivity
connectivity_results = test_connectivity(args.host)
# Test flow execution if credentials provided
flow_results = None
if args.api_key and args.flow_id:
flow_results = test_flow_endpoint(args.host, args.api_key, args.flow_id)
else:
print("⚠️ Skipping flow test (no API key or flow ID provided)")
# Run mini load test if requested
load_results = None
if args.load_test > 0 and args.api_key and args.flow_id:
load_results = run_load_simulation(args.host, args.api_key, args.flow_id, args.load_test)
# Summary
print("\n" + "=" * 60)
print("📋 DIAGNOSTIC SUMMARY")
print("=" * 60)
print(f"Host: {args.host}")
print(f"Connectivity: {'✅ OK' if connectivity_results['reachable'] else '❌ FAILED'}")
print(f"Health Check: {'✅ OK' if connectivity_results['health_check'] else '❌ FAILED'}")
if connectivity_results["response_time_ms"]:
print(f"Health Response Time: {connectivity_results['response_time_ms']}ms")
if flow_results:
print(f"Flow Execution: {'✅ OK' if flow_results['success'] else '❌ FAILED'}")
if flow_results["response_time_ms"]:
print(f"Flow Response Time: {flow_results['response_time_ms']}ms")
if load_results:
success_rate = (load_results["successful_requests"] / load_results["total_requests"]) * 100
print(
f"Mini Load Test: {load_results['successful_requests']}/{load_results['total_requests']} ({success_rate:.1f}% success)"
)
if load_results.get("avg_response_time"):
print(f"Average Response Time: {load_results['avg_response_time']}ms")
# Recommendations
print("\n🔧 RECOMMENDATIONS:")
if not connectivity_results["reachable"]:
print("❌ Cannot reach the host - check URL and network connectivity")
elif not connectivity_results["health_check"]:
print("❌ Health check failed - Langflow may not be running properly")
elif flow_results and not flow_results["success"]:
print("❌ Flow execution failed - check API key, flow ID, and flow configuration")
elif load_results and load_results["connection_errors"] > 0:
print("⚠️ Connection errors detected - instance may be overloaded or unstable")
elif load_results and load_results.get("avg_response_time", 0) > 10000:
print("⚠️ Slow response times - consider reducing load or optimizing flow")
else:
print("✅ Instance appears healthy for load testing")
# Save results if requested
if args.output:
results = {
"timestamp": time.time(),
"host": args.host,
"connectivity": connectivity_results,
"flow_execution": flow_results,
"load_simulation": load_results,
}
with open(args.output, "w") as f:
json.dump(results, f, indent=2)
print(f"\n💾 Results saved to: {args.output}")
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/diagnose_remote.py",
"license": "MIT License",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/langflow_example_workflow.py | #!/usr/bin/env python3
"""Example Langflow Load Testing Workflow
This script demonstrates the complete workflow for setting up and running
Langflow load tests with real starter project flows.
Usage:
python example_workflow.py
"""
import os
import subprocess
import sys
from pathlib import Path
def run_command(cmd, description="", check=True):
"""Run a command with nice output."""
print(f"\n{'=' * 60}")
print(f"RUNNING: {description}")
print(f"{'=' * 60}")
print(f"Command: {' '.join(cmd) if isinstance(cmd, list) else cmd}")
print()
try:
result = subprocess.run(cmd, check=check)
return result.returncode == 0
except subprocess.CalledProcessError as e:
print(f"❌ Command failed with exit code {e.returncode}")
return False
except KeyboardInterrupt:
print("\n⚠️ Interrupted by user")
return False
def run_command_with_env(cmd, description="", env=None, check=True):
"""Run a command with custom environment variables."""
print(f"\n{'=' * 60}")
print(f"RUNNING: {description}")
print(f"{'=' * 60}")
print(f"Command: {' '.join(cmd) if isinstance(cmd, list) else cmd}")
print()
try:
result = subprocess.run(cmd, env=env, check=check)
return result.returncode == 0
except subprocess.CalledProcessError as e:
print(f"❌ Command failed with exit code {e.returncode}")
return False
except KeyboardInterrupt:
print("\n⚠️ Interrupted by user")
return False
def check_dependencies():
"""Check if required dependencies are installed."""
print("Checking dependencies...")
try:
import httpx
print("✅ httpx is available")
except ImportError:
print("❌ httpx not found. Install with: pip install httpx")
return False
try:
result = subprocess.run(["locust", "--version"], check=False, capture_output=True, text=True)
if result.returncode == 0:
print(f"✅ locust is available: {result.stdout.strip()}")
else:
print("❌ locust not found. Install with: pip install locust")
return False
except FileNotFoundError:
print("❌ locust not found. Install with: pip install locust")
return False
return True
def main():
import argparse
parser = argparse.ArgumentParser(description="Example Langflow Load Testing Workflow")
parser.add_argument("--auto", action="store_true", help="Run automatically without user input prompts")
args = parser.parse_args()
print("🚀 Langflow Load Testing Example Workflow")
print("This example will demonstrate the complete load testing setup and execution.")
# Check dependencies
if not check_dependencies():
print("\n❌ Missing dependencies. Please install them and try again.")
sys.exit(1)
script_dir = Path(__file__).parent
setup_script = script_dir / "langflow_setup_test.py"
runner_script = script_dir / "langflow_run_load_test.py"
# Check if scripts exist
if not setup_script.exists():
print(f"❌ Setup script not found: {setup_script}")
sys.exit(1)
if not runner_script.exists():
print(f"❌ Runner script not found: {runner_script}")
sys.exit(1)
print("\n" + "=" * 80)
print("EXAMPLE WORKFLOW STEPS")
print("=" * 80)
print("1. List available starter project flows")
print("2. Set up test environment with a selected flow")
print("3. Run a quick load test")
print("4. Show results and cleanup options")
print("=" * 80)
def wait_for_user(message):
if args.auto:
print(f"\n{message} (auto mode - continuing...)")
import time
time.sleep(1)
else:
input(f"\n{message}")
try:
# Step 1: List available flows
wait_for_user("Press Enter to list available starter project flows...")
if not run_command([sys.executable, str(setup_script), "--list-flows"], "List available starter project flows"):
return
# Step 2: Setup with Basic Prompting (good for examples)
wait_for_user("Press Enter to set up test environment with 'Basic Prompting' flow...")
if not run_command(
[
sys.executable,
str(setup_script),
"--flow",
"Basic Prompting",
"--save-credentials",
"example_test_creds.json",
],
"Set up test environment",
):
return
# Step 3: Run a quick load test
wait_for_user("Press Enter to run a quick load test (10 users, 30 seconds)...")
# Load credentials from the saved file and set environment variables
try:
import json
with open("example_test_creds.json") as f:
creds = json.load(f)
# Set environment variables for the load test
env = os.environ.copy()
env["LANGFLOW_HOST"] = creds["host"]
env["API_KEY"] = creds["api_key"]
env["FLOW_ID"] = creds["flow_id"]
print(" 🔧 Setting environment variables:")
print(f" LANGFLOW_HOST={creds['host']}")
print(f" API_KEY={creds['api_key'][:20]}...")
print(f" FLOW_ID={creds['flow_id']}")
except Exception as e:
print(f" ⚠️ Could not load credentials: {e}")
env = os.environ.copy()
# Run the load test with proper environment
success = run_command_with_env(
[
sys.executable,
str(runner_script),
"--headless",
"--users",
"100",
"--spawn-rate",
"2",
"--duration",
"30",
"--no-start-langflow",
"--html",
"langflow_load_test_report.html",
"--csv",
"langflow_load_test_results",
],
"Run quick load test with HTML report generation",
env=env,
)
if not success:
print("⚠️ Load test may have failed, but that's okay for this example")
# Step 4: Show what's possible
print(f"\n{'=' * 80}")
print("EXAMPLE COMPLETE - WHAT'S NEXT?")
print(f"{'=' * 80}")
print("The example workflow is complete! Here's what you can do next:")
print()
print("🔧 Try different flows:")
print(" python setup_langflow_test.py --interactive")
print()
print("📊 Run more comprehensive tests:")
print(" python run_load_test.py --shape ramp100 --headless --users 100 --duration 180")
print()
print("🌐 Use the web UI for interactive testing:")
print(" python run_load_test.py")
print()
print("💾 Your test credentials are saved in: example_test_creds.json")
print()
print("📊 Generated Reports:")
print(" - langflow_load_test_report.html (detailed HTML report)")
print(" - langflow_load_test_results_*.csv (CSV data files)")
print(" - langflow_load_test_detailed_errors_*.log (detailed error logs)")
print(" - langflow_load_test_error_summary_*.json (error analysis)")
print(" - langflow_server_logs_during_test_*.log (Langflow server logs)")
print()
print("🧹 Clean up:")
print(" - Remove test flows from Langflow UI")
print(" - Delete example_test_creds.json")
print(" - Delete generated report files")
print(" - Reset environment variables")
print(f"{'=' * 80}")
# Cleanup option
if args.auto:
print("\nAuto mode - skipping cleanup so you can view the generated reports!")
print("📁 Files preserved:")
print(" - example_test_creds.json")
print(" - langflow_load_test_report.html")
print(" - langflow_load_test_results_*.csv")
else:
cleanup_response = input("\nWould you like to clean up the example files? (y/N): ").strip().lower()
if cleanup_response == "y":
files_to_clean = [
"example_test_creds.json",
"langflow_load_test_report.html",
"langflow_load_test_results_failures.csv",
"langflow_load_test_results_stats.csv",
"langflow_load_test_results_stats_history.csv",
"langflow_load_test_results_exceptions.csv",
]
# Also clean up error logs (they have timestamps, so use glob pattern)
import glob
error_files = glob.glob("langflow_load_test_detailed_errors_*.log")
error_files.extend(glob.glob("langflow_load_test_error_summary_*.json"))
error_files.extend(glob.glob("langflow_server_logs_during_test_*.log"))
files_to_clean.extend(error_files)
for file_path in files_to_clean:
try:
os.remove(file_path)
print(f"✅ Cleaned up {file_path}")
except FileNotFoundError:
pass # File doesn't exist, that's fine
except Exception as e:
print(f"⚠️ Could not clean up {file_path}: {e}")
print("\n🎉 Example workflow completed successfully!")
print("You're now ready to use Langflow load testing for your own projects.")
print()
print("📊 View your load test results:")
print(" • Open langflow_load_test_report.html in your browser for detailed analysis")
print(" • Check langflow_load_test_results_*.csv for raw data")
print(" • Review langflow_load_test_detailed_errors_*.log for comprehensive error details")
print(" • Analyze langflow_load_test_error_summary_*.json for error patterns")
print(" • Examine langflow_server_logs_during_test_*.log for server-side issues")
except KeyboardInterrupt:
print("\n\n⚠️ Example workflow interrupted by user")
except Exception as e:
print(f"\n❌ Example workflow failed: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/langflow_example_workflow.py",
"license": "MIT License",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/langflow_locustfile.py | """Langflow API Locust Load Testing File.
Comprehensive load testing for Langflow API with multiple user behaviors and performance analysis.
Based on production-ready patterns with proper error handling, metrics tracking, and reporting.
Usage:
# Run with web UI (recommended)
locust -f locustfile.py --host http://localhost:7860
# Run headless with built-in shape
locust -f locustfile.py --host http://localhost:7860 --headless --shape RampToHundred
# Run distributed (master)
locust -f locustfile.py --host http://localhost:7860 --master
# Run distributed (worker)
locust -f locustfile.py --host http://localhost:7860 --worker --master-host=localhost
Environment Variables:
- LANGFLOW_HOST: Base URL for the Langflow server (default: http://localhost:7860)
- FLOW_ID: Flow ID to test (required)
- API_KEY: API key for authentication (required)
- MIN_WAIT: Minimum wait time between requests in ms (default: 2000)
- MAX_WAIT: Maximum wait time between requests in ms (default: 5000)
- REQUEST_TIMEOUT: Request timeout in seconds (default: 30.0)
- SHAPE: Load test shape to use (default: none, options: ramp100, stepramp)
"""
import inspect
import json
import logging
import os
import random
import time
import traceback
from datetime import datetime
from pathlib import Path
import gevent
from locust import FastHttpUser, LoadTestShape, between, constant, constant_pacing, events, task
# Test messages with realistic distribution
TEST_MESSAGES = {
"minimal": "Hi",
"simple": "Can you help me?",
"medium": "I need help understanding how machine learning works in this context.",
"complex": "Please analyze this data: " + "x" * 500 + " and provide detailed insights.",
"large": "Here's a complex scenario: " + "data " * 1000,
"realistic": "Hey, Could you check https://docs.langflow.org for me? Later, could you calculate 1390 / 192 ?",
}
# Weighted message distribution for realistic load
MESSAGE_WEIGHTS = [("simple", 40), ("realistic", 25), ("medium", 20), ("minimal", 10), ("complex", 4), ("large", 1)]
# Enhanced error logging setup
ERROR_LOG_FILE = None
LANGFLOW_LOG_FILE = None
DETAILED_ERRORS = []
def setup_error_logging():
"""Set up detailed error logging for the load test."""
global ERROR_LOG_FILE, LANGFLOW_LOG_FILE
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Create detailed error log
ERROR_LOG_FILE = f"langflow_load_test_detailed_errors_{timestamp}.log"
# Set up error logger
error_logger = logging.getLogger("langflow_load_test_errors")
error_logger.setLevel(logging.DEBUG)
# Create file handler
error_handler = logging.FileHandler(ERROR_LOG_FILE)
error_handler.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
error_handler.setFormatter(formatter)
error_logger.addHandler(error_handler)
# Try to capture Langflow logs
langflow_log_paths = ["langflow.log", "logs/langflow.log", "../../../langflow.log", "../../../../langflow.log"]
for log_path in langflow_log_paths:
if Path(log_path).exists():
LANGFLOW_LOG_FILE = log_path
break
print("📝 Error logging setup:")
print(f" • Detailed errors: {ERROR_LOG_FILE}")
if LANGFLOW_LOG_FILE:
print(f" • Langflow logs: {LANGFLOW_LOG_FILE}")
else:
print(" • Langflow logs: Not found (will monitor common locations)")
def log_detailed_error(
user_class, method, url, status_code, response_text, exception=None, request_data=None, traceback=None
):
"""Log detailed error information."""
global DETAILED_ERRORS
error_logger = logging.getLogger("langflow_load_test_errors")
error_info = {
"timestamp": datetime.now().isoformat(),
"user_class": user_class,
"method": method,
"url": url,
"status_code": status_code,
"response_text": response_text[:1000] if response_text else None, # Limit response size
"request_data": request_data,
"exception": str(exception) if exception else None,
"traceback": traceback if traceback else None,
}
DETAILED_ERRORS.append(error_info)
# Log to file
error_logger.error(f"""
=== LOAD TEST ERROR ===
User Class: {user_class}
Method: {method}
URL: {url}
Status Code: {status_code}
Request Data: {json.dumps(request_data, indent=2) if request_data else "None"}
Response Text: {response_text[:500] if response_text else "None"}...
Exception: {exception}
Traceback: {traceback.format_exc() if exception else "None"}
========================
""")
def save_error_summary():
"""Save a summary of all errors encountered during the test."""
if not DETAILED_ERRORS:
return
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
summary_file = f"langflow_load_test_error_summary_{timestamp}.json"
# Group errors by type
error_summary = {}
for error in DETAILED_ERRORS:
key = f"{error['status_code']}_{error['user_class']}"
if key not in error_summary:
error_summary[key] = {
"count": 0,
"examples": [],
"status_code": error["status_code"],
"user_class": error["user_class"],
}
error_summary[key]["count"] += 1
if len(error_summary[key]["examples"]) < 3: # Keep up to 3 examples
error_summary[key]["examples"].append(error)
# Save summary
with open(summary_file, "w") as f:
json.dump(
{
"test_timestamp": timestamp,
"total_errors": len(DETAILED_ERRORS),
"error_types": len(error_summary),
"error_breakdown": error_summary,
},
f,
indent=2,
)
print(f"📊 Error summary saved: {summary_file}")
def capture_langflow_logs():
"""Capture recent Langflow logs if available."""
if not LANGFLOW_LOG_FILE or not Path(LANGFLOW_LOG_FILE).exists():
return None
try:
# Read last 1000 lines of Langflow log
with open(LANGFLOW_LOG_FILE) as f:
lines = f.readlines()
recent_lines = lines[-1000:] if len(lines) > 1000 else lines
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
captured_log_file = f"langflow_server_logs_during_test_{timestamp}.log"
with open(captured_log_file, "w") as f:
f.write("# Langflow server logs captured during load test\n")
f.write(f"# Original log file: {LANGFLOW_LOG_FILE}\n")
f.write(f"# Capture time: {datetime.now().isoformat()}\n")
f.write(f"# Lines captured: {len(recent_lines)}\n\n")
f.writelines(recent_lines)
print(f"📋 Langflow logs captured: {captured_log_file}")
return captured_log_file
except Exception as e:
print(f"⚠️ Could not capture Langflow logs: {e}")
return None
# Load test shapes
class RampToHundred(LoadTestShape):
"""0 -> 100 users at 5 users/sec (20s ramp), then hold until 180s total.
Matches production testing patterns: 3 minutes, ramping to 100 users.
"""
spawn_rate = 5
target_users = 100
total_duration = 180 # seconds
def tick(self):
run_time = self.get_run_time()
if run_time >= self.total_duration:
return None
users = min(int(run_time * self.spawn_rate), self.target_users)
return users, self.spawn_rate
class StepRamp(LoadTestShape):
"""Step ramp for finding performance cliffs.
Steps every 30 seconds: 5 -> 10 -> 15 -> 20 -> 25 -> 30 -> 35 users.
Each step holds for exactly 30 seconds to measure steady-state performance.
"""
def tick(self):
run_time = self.get_run_time()
# Define the step progression with 30-second intervals
steps = [
(30, 5), # 0-30s: 5 users
(60, 10), # 30-60s: 10 users
(90, 15), # 60-90s: 15 users
(120, 20), # 90-120s: 20 users
(150, 25), # 120-150s: 25 users
(180, 30), # 150-180s: 30 users
(210, 35), # 180-210s: 35 users
(240, 40), # 210-240s: 40 users
(270, 45), # 240-270s: 45 users
(300, 50), # 270-300s: 50 users
]
# Find current step
for time_limit, user_count in steps:
if run_time < time_limit:
return user_count, 10 # Fast spawn rate for quick transitions
return None # End test after 300 seconds
# Environment-scoped metrics tracking
_env_bags = {}
@events.test_start.add_listener
def on_test_start(environment, **_kwargs):
"""Initialize per-environment metrics tracking."""
# Set up enhanced error logging
setup_error_logging()
_env_bags[environment] = {
"slow_10s": 0,
"slow_20s": 0,
}
@events.request.add_listener
def on_request(request_type, name, response_time, response_length, exception, context, **kwargs): # noqa: ARG001
"""Track slow requests using Locust's built-in timing."""
# response_time is in milliseconds from Locust
bag = _env_bags.get(context.get("environment") if context else None)
if bag is None:
# fallback: try the single environment we likely have
if len(_env_bags) == 1:
bag = next(iter(_env_bags.values()))
else:
return
if exception is None: # Only count successful requests for timing
if response_time > 10_000: # 10 seconds in ms
bag["slow_10s"] += 1
if response_time > 20_000: # 20 seconds in ms
bag["slow_20s"] += 1
@events.test_stop.add_listener
def on_test_stop(environment, **_kwargs):
"""Print comprehensive test summary with performance grading."""
stats = environment.stats.total
if stats.num_requests == 0:
return
# Get percentiles and basic stats
p50 = stats.get_response_time_percentile(0.50) or 0
p95 = stats.get_response_time_percentile(0.95) or 0
p99 = stats.get_response_time_percentile(0.99) or 0
fail_ratio = stats.fail_ratio
current_rps = getattr(stats, "current_rps", 0.0)
# Get slow request counts
bag = _env_bags.get(environment, {"slow_10s": 0, "slow_20s": 0})
# Performance grading based on production criteria
grade = "A"
issues = []
if fail_ratio > 0.01:
grade = "B"
issues.append(f"fail {fail_ratio:.1%}")
if fail_ratio > 0.05:
grade = "C"
if p95 > 10_000:
grade = max(grade, "D")
issues.append(f"p95 {p95 / 1000:.1f}s")
if p95 > 20_000:
grade = "F"
issues.append(f"p95 {p95 / 1000:.1f}s")
print(f"\n{'=' * 60}")
print(f"LANGFLOW API LOAD TEST RESULTS - GRADE: {grade}")
print(f"{'=' * 60}")
print(f"Requests: {stats.num_requests:,} | Failures: {stats.num_failures:,} ({fail_ratio:.1%})")
print(f"Response Times: p50={p50 / 1000:.2f}s p95={p95 / 1000:.2f}s p99={p99 / 1000:.2f}s")
print(f"RPS: {current_rps:.1f} | Slow requests: >10s={bag['slow_10s']} >20s={bag['slow_20s']}")
if issues:
print(f"Issues: {', '.join(issues)}")
# Production readiness assessment
if grade in ["A", "B"]:
print("✅ PRODUCTION READY - Performance meets production standards")
elif grade == "C":
print("⚠️ CAUTION - Acceptable but monitor closely in production")
else:
print("❌ NOT PRODUCTION READY - Significant performance issues detected")
print(f"{'=' * 60}\n")
# Save detailed error information
save_error_summary()
# Capture Langflow logs
capture_langflow_logs()
# Set exit code for CI/CD
if fail_ratio > 0.01:
environment.process_exit_code = 1
# Cleanup
_env_bags.pop(environment, None)
class BaseLangflowUser(FastHttpUser):
"""Base class for all Langflow API load testing user types."""
abstract = True
REQUEST_TIMEOUT = float(os.getenv("REQUEST_TIMEOUT", "30.0"))
# Use the host provided by environment variable or default
host = os.getenv("LANGFLOW_HOST", "http://localhost:7860")
def on_start(self):
"""Called when a user starts before any task is scheduled."""
# Get credentials from environment variables
self.api_key = os.getenv("API_KEY")
self.flow_id = os.getenv("FLOW_ID")
if not self.api_key:
raise ValueError("API_KEY environment variable is required. Run setup_langflow_test.py first.")
if not self.flow_id:
raise ValueError("FLOW_ID environment variable is required. Run setup_langflow_test.py first.")
self.session_id = f"locust_{self.__class__.__name__}_{id(self)}_{int(time.time())}"
self.request_count = 0
# Test connection and auth
with self.client.get("/health", catch_response=True) as response:
if response.status_code != 200:
raise ConnectionError(f"Health check failed: {response.status_code}")
def make_request(self, message_type="simple", tag_suffix=""):
"""Make a request with proper error handling and timing."""
message = TEST_MESSAGES.get(message_type, TEST_MESSAGES["simple"])
# Langflow API payload structure
payload = {
"input_value": message,
"output_type": "chat",
"input_type": "chat",
"tweaks": {},
}
headers = {
"x-api-key": self.api_key,
"Content-Type": "application/json",
"Accept": "application/json",
}
self.request_count += 1
endpoint = f"/api/v1/run/{self.flow_id}?stream=false"
name = f"{endpoint} [{message_type}{tag_suffix}]"
try:
with self.client.post(
endpoint,
json=payload,
headers=headers,
name=name,
timeout=self.REQUEST_TIMEOUT,
catch_response=True,
) as response:
# Get response text for error logging
try:
response_text = response.text
except Exception:
response_text = "Could not read response text"
# Handle successful responses
if response.status_code == 200:
try:
data = response.json()
# Langflow API success check - look for outputs
if data.get("outputs"):
return response.success()
# Check for error messages in the response
error_msg = data.get("detail", "Unknown error")
# Log detailed error for successful HTTP but failed flow execution
log_detailed_error(
user_class=self.__class__.__name__,
method="POST",
url=f"{self.host}{endpoint}",
status_code=response.status_code,
response_text=response_text,
request_data=payload,
exception=None,
)
return response.failure(f"Flow execution failed: {error_msg}")
except json.JSONDecodeError as e:
log_detailed_error(
user_class=self.__class__.__name__,
method="POST",
url=f"{self.host}{endpoint}",
status_code=response.status_code,
response_text=response_text,
request_data=payload,
exception=e,
)
return response.failure("Invalid JSON response")
# Log all error responses with detailed information
log_detailed_error(
user_class=self.__class__.__name__,
method="POST",
url=f"{self.host}{endpoint}",
status_code=response.status_code,
response_text=response_text,
request_data=payload,
exception=None,
)
# Handle specific error cases
if response.status_code in (429, 503):
return response.failure(f"Backpressure/capacity: {response.status_code}")
if response.status_code == 401:
return response.failure("Unauthorized - API key issue")
if response.status_code == 404:
return response.failure("Flow not found - check FLOW_ID")
if response.status_code >= 500:
return response.failure(f"Server error {response.status_code}")
return response.failure(f"HTTP {response.status_code}")
except Exception as e:
# Get more detailed error information
error_details = {
"error_type": type(e).__name__,
"error_message": str(e),
"is_timeout": "timeout" in str(e).lower(),
"is_connection_error": "connection" in str(e).lower(),
"is_dns_error": "name resolution" in str(e).lower() or "dns" in str(e).lower(),
}
# Log any exceptions that occur during the request
log_detailed_error(
user_class=self.__class__.__name__,
method="POST",
url=f"{self.host}{endpoint}",
status_code=0, # Connection error
response_text=f"Connection Error: {error_details}",
request_data=payload,
exception=str(e),
traceback=traceback.format_exc(),
)
# Re-raise the exception so Locust can handle it properly
raise
class NormalUser(BaseLangflowUser):
"""Normal user simulating typical API interactions."""
weight = 3
wait_time = between(0.5, 2) # Typical user think time
@task(80)
def send_message(self):
"""Main task: Send a message with weighted distribution."""
message_type = random.choices([w[0] for w in MESSAGE_WEIGHTS], weights=[w[1] for w in MESSAGE_WEIGHTS], k=1)[0] # noqa: S311
self.make_request(message_type=message_type)
@task(15)
def send_burst(self):
"""Send a burst of 3 small messages quickly."""
for i in range(3):
self.make_request(message_type="minimal", tag_suffix=f"-burst{i}")
gevent.sleep(0.1) # Small delay between burst requests
@task(5)
def send_complex(self):
"""Occasionally send complex requests that stress the system."""
self.make_request(message_type="complex")
class AggressiveUser(BaseLangflowUser):
"""Aggressive user with minimal wait times."""
weight = 3
wait_time = between(0.1, 0.3) # Very aggressive
@task
def rapid_fire(self):
"""Send requests as fast as possible."""
self.make_request(message_type="simple", tag_suffix="-rapid")
class SustainedLoadUser(BaseLangflowUser):
"""Maintains exactly 1 request/second for steady load testing."""
weight = 3
wait_time = constant_pacing(1) # Exactly 1 request per second per user
@task
def steady_load(self):
"""Send requests at constant 1 RPS per user."""
self.make_request(message_type="medium", tag_suffix="-steady")
class TailLatencyHunter(BaseLangflowUser):
"""Mixed workload designed to expose tail latency issues."""
weight = 3
wait_time = between(0.8, 1.5)
@task
def hunt_tail_latency(self):
"""Alternate between simple and complex requests to find tail latency."""
if random.random() < 0.7: # noqa: S311
self.make_request(message_type="simple", tag_suffix="-tail")
else:
self.make_request(message_type="large", tag_suffix="-tail-heavy")
class ScalabilityTestUser(BaseLangflowUser):
"""Tests for scalability limits."""
weight = 3
wait_time = constant(1.0) # Constant load to test scaling
@task
def scalability_test(self):
"""Send medium complexity requests to test scaling limits."""
self.make_request(message_type="medium", tag_suffix="-scale")
class BurstUser(BaseLangflowUser):
"""Sends bursts of requests to test connection pooling."""
weight = 3
wait_time = between(5, 10) # Long wait between bursts
@task
def burst_attack(self):
"""Send a burst of 10 requests quickly to test connection handling."""
for i in range(10):
self.make_request(message_type="minimal", tag_suffix=f"-burst{i}")
gevent.sleep(0.05) # 50ms between requests in burst
# Legacy user class for backward compatibility
class FlowRunUser(NormalUser):
"""Legacy FlowRunUser - now inherits from NormalUser for backward compatibility."""
# Auto-select shape based on environment variable
_shape_env = os.getenv("SHAPE", "").lower()
_selected = None
if _shape_env == "stepramp":
_selected = StepRamp
elif _shape_env == "ramp100":
_selected = RampToHundred
if _selected:
# Create a single exported shape class and remove others so Locust sees only one
SelectedLoadTestShape = type("SelectedLoadTestShape", (_selected,), {})
globals()["SelectedLoadTestShape"] = SelectedLoadTestShape
# Remove other shape classes so Locust auto-picks the selected one
for _name, _obj in list(globals().items()):
if (
inspect.isclass(_obj)
and issubclass(_obj, LoadTestShape)
and _obj is not SelectedLoadTestShape
and _obj is not LoadTestShape
):
del globals()[_name]
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/langflow_locustfile.py",
"license": "MIT License",
"lines": 493,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/langflow_run_load_test.py | #!/usr/bin/env python3
"""Langflow Load Test Runner
This script provides an easy way to run Langflow load tests.
For first-time setup, use setup_langflow_test.py to create test credentials.
Usage:
# First time setup (run once):
python setup_langflow_test.py --interactive
# Then run load tests:
python run_load_test.py --help
python run_load_test.py --users 10 --duration 60
python run_load_test.py --shape ramp100 --host http://localhost:7860
"""
import argparse
import os
import subprocess
import sys
import time
from pathlib import Path
def run_command(cmd, check=True, capture_output=False):
"""Run a shell command with proper error handling."""
print(f"Running: {' '.join(cmd) if isinstance(cmd, list) else cmd}")
try:
if capture_output:
result = subprocess.run(cmd, shell=isinstance(cmd, str), capture_output=True, text=True, check=check)
return result.stdout.strip()
subprocess.run(cmd, shell=isinstance(cmd, str), check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
if capture_output and e.stdout:
print(f"STDOUT: {e.stdout}")
if capture_output and e.stderr:
print(f"STDERR: {e.stderr}")
if check:
sys.exit(1)
def check_langflow_running(host):
"""Check if Langflow is already running."""
try:
import httpx
with httpx.Client(timeout=5.0) as client:
response = client.get(f"{host}/health")
return response.status_code == 200
except Exception:
return False
def test_single_request(host):
"""Test a single flow request to ensure the setup works before load testing."""
import os
import httpx
api_key = os.getenv("API_KEY")
flow_id = os.getenv("FLOW_ID")
if not api_key or not flow_id:
print("⚠️ Missing API_KEY or FLOW_ID for test request")
return False
print("\n🧪 Testing single request before load test...")
print(f" Flow ID: {flow_id}")
print(f" API Key: {api_key[:20]}...")
# First, test basic connectivity
print(" 🔗 Testing basic connectivity...")
try:
with httpx.Client(timeout=10.0) as client:
health_response = client.get(f"{host}/health")
if health_response.status_code == 200:
print(" ✅ Health check passed")
else:
print(f" ⚠️ Health check failed: {health_response.status_code}")
return False
except Exception as e:
print(f" ❌ Connectivity test failed: {e}")
print(f" Error type: {type(e).__name__}")
return False
# Now test the actual flow request
print(" 🎯 Testing flow request...")
try:
url = f"{host}/api/v1/run/{flow_id}?stream=false"
payload = {
"input_value": "Hello, this is a test message",
"output_type": "chat",
"input_type": "chat",
"tweaks": {},
}
headers = {"x-api-key": api_key, "Content-Type": "application/json"}
with httpx.Client(timeout=30.0) as client:
response = client.post(url, json=payload, headers=headers)
print(f" 📡 Response status: {response.status_code}")
if response.status_code == 200:
try:
data = response.json()
if data.get("outputs"):
print(" ✅ Test request successful - flow is working!")
return True
print(f" ⚠️ Flow executed but no outputs returned: {data}")
return False
except Exception as e:
print(f" ⚠️ Invalid JSON response: {e}")
return False
else:
print(f" ❌ Test request failed: {response.status_code}")
print(f" Response: {response.text[:200]}...")
return False
except Exception as e:
print(f" ❌ Test request error: {e}")
return False
def wait_for_langflow(host, timeout=60):
"""Wait for Langflow to be ready."""
print(f"Waiting for Langflow to be ready at {host}...")
start_time = time.time()
while time.time() - start_time < timeout:
if check_langflow_running(host):
print("✅ Langflow is ready!")
return True
time.sleep(2)
print(f"❌ Langflow did not start within {timeout} seconds")
return False
def start_langflow(host, port):
"""Start Langflow server if not already running."""
if check_langflow_running(host):
print(f"✅ Langflow is already running at {host}")
return None
print(f"Starting Langflow server on port {port}...")
# Start Langflow in the background
cmd = [
sys.executable,
"-m",
"langflow",
"run",
"--host",
"0.0.0.0",
"--port",
str(port),
"--auto-login",
"--log-level",
"warning",
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Wait for it to be ready
if wait_for_langflow(host, timeout=60):
return process
process.terminate()
return None
def run_locust_test(args):
"""Run the Locust load test."""
locust_file = Path(__file__).parent / "langflow_locustfile.py"
# Check for required environment variables
if not os.getenv("API_KEY"):
print("❌ API_KEY environment variable not found!")
print("Run langflow_setup_test.py first to create test credentials.")
sys.exit(1)
if not os.getenv("FLOW_ID"):
print("❌ FLOW_ID environment variable not found!")
print("Run langflow_setup_test.py first to create test credentials.")
sys.exit(1)
cmd = [
"locust",
"-f",
str(locust_file),
"--host",
args.host,
]
# Add shape if specified
env = os.environ.copy()
if args.shape:
env["SHAPE"] = args.shape
# Add other environment variables
env["LANGFLOW_HOST"] = args.host
if args.headless:
cmd.extend(
[
"--headless",
"--users",
str(args.users),
"--spawn-rate",
str(args.spawn_rate),
"--run-time",
f"{args.duration}s",
]
)
if args.csv:
cmd.extend(["--csv", args.csv])
if args.html:
cmd.extend(["--html", args.html])
print(f"\n{'=' * 60}")
print("STARTING LOAD TEST")
print(f"{'=' * 60}")
print(f"Command: {' '.join(cmd)}")
print(f"Host: {args.host}")
print(f"Users: {args.users}")
print(f"Duration: {args.duration}s")
print(f"Shape: {args.shape or 'default'}")
print(f"API Key: {env.get('API_KEY', 'N/A')[:20]}...")
print(f"Flow ID: {env.get('FLOW_ID', 'N/A')}")
if args.html:
print(f"HTML Report: {args.html}")
if args.csv:
print(f"CSV Reports: {args.csv}_*.csv")
print(f"{'=' * 60}\n")
subprocess.run(cmd, check=False, env=env)
def main():
parser = argparse.ArgumentParser(
description="Run Langflow load tests with automatic setup",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run with web UI (interactive)
python run_load_test.py
# Run headless test with 50 users for 2 minutes
python run_load_test.py --headless --users 50 --duration 120
# Run with specific load shape
python run_load_test.py --shape ramp100 --headless --users 100 --duration 180
# Run against existing Langflow instance
python run_load_test.py --host http://localhost:8000 --no-start-langflow
# Save results to CSV
python run_load_test.py --headless --csv results --users 25 --duration 60
""",
)
# Langflow options
parser.add_argument(
"--host",
default="http://localhost:7860",
help="Langflow host URL (default: http://localhost:7860, use https:// for remote instances)",
)
parser.add_argument("--port", type=int, default=7860, help="Port to start Langflow on (default: 7860)")
parser.add_argument(
"--no-start-langflow",
action="store_true",
help="Don't start Langflow automatically (assume it's already running)",
)
# Load test options
parser.add_argument("--headless", action="store_true", help="Run in headless mode (no web UI)")
parser.add_argument("--users", type=int, default=50, help="Number of concurrent users (default: 20)")
parser.add_argument(
"--spawn-rate", type=int, default=2, help="Rate to spawn users at (users per second, default: 2)"
)
parser.add_argument("--duration", type=int, default=60, help="Test duration in seconds (default: 60)")
parser.add_argument("--shape", choices=["ramp100", "stepramp"], help="Load test shape to use")
parser.add_argument("--csv", help="Save results to CSV files with this prefix")
parser.add_argument("--html", help="Generate HTML report with this filename (e.g., report.html)")
args = parser.parse_args()
# Check dependencies
try:
import httpx
import locust
except ImportError as e:
print(f"❌ Missing dependency: {e}")
print("Install with: pip install locust httpx")
sys.exit(1)
langflow_process = None
try:
# Start Langflow if needed
if not args.no_start_langflow:
if args.host.startswith("https://") or not args.host.startswith("http://localhost"):
print(f"⚠️ Remote host detected: {args.host}")
print(" For remote instances, use --no-start-langflow flag")
print(" Example: --host https://your-remote-instance.com --no-start-langflow")
sys.exit(1)
langflow_process = start_langflow(args.host, args.port)
if not langflow_process:
print("❌ Failed to start Langflow")
sys.exit(1)
# Just check if it's running
elif not check_langflow_running(args.host):
print(f"❌ Langflow is not running at {args.host}")
if args.host.startswith("https://"):
print(" Make sure your remote Langflow instance is accessible")
else:
print("Either start Langflow manually or remove --no-start-langflow flag")
sys.exit(1)
else:
print(f"🔗 Using existing Langflow instance at {args.host}")
if args.host.startswith("https://"):
print(" ✅ Remote instance mode")
# Test a single request before running the full load test
if not test_single_request(args.host):
print("❌ Single request test failed. Aborting load test.")
sys.exit(1)
# Run the load test
run_locust_test(args)
except KeyboardInterrupt:
print("\n⚠️ Test interrupted by user")
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
finally:
# Clean up Langflow process
if langflow_process:
print("\nStopping Langflow server...")
langflow_process.terminate()
try:
langflow_process.wait(timeout=10)
except subprocess.TimeoutExpired:
langflow_process.kill()
print("✅ Langflow server stopped")
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/langflow_run_load_test.py",
"license": "MIT License",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/langflow_setup_test.py | #!/usr/bin/env python3
"""Langflow Load Test Setup CLI
This script sets up a complete Langflow test environment by:
1. Starting Langflow (optional)
2. Creating a test user account
3. Authenticating and getting JWT tokens
4. Creating API keys
5. Loading a real starter project flow
6. Providing credentials for load testing
Usage:
python setup_langflow_test.py --help
python setup_langflow_test.py --interactive
python setup_langflow_test.py --flow "Basic Prompting"
python setup_langflow_test.py --list-flows
"""
import argparse
import asyncio
import json
import sys
import time
async def get_starter_projects_from_api(host: str, access_token: str) -> list[dict]:
"""Get starter projects from Langflow API."""
import httpx
# Ensure proper URL formatting
base_host = host.rstrip("/")
url = f"{base_host}/api/v1/starter-projects/"
print(f" 🔍 Fetching starter projects from: {url}")
try:
async with httpx.AsyncClient() as client:
# Try with authentication first
response = await client.get(
url,
headers={"Authorization": f"Bearer {access_token}"},
timeout=30.0,
)
print(f" 📡 Response status: {response.status_code}")
# If auth fails, try without authentication (some endpoints might be public)
if response.status_code == 401:
print(" 🔄 Trying without authentication...")
response = await client.get(url, timeout=30.0)
print(f" 📡 Response status (no auth): {response.status_code}")
if response.status_code != 200:
print(f"⚠️ Failed to get starter projects: {response.status_code}")
print(f"Response: {response.text}")
return []
# Check if response is empty
if not response.text.strip():
print("⚠️ Empty response from starter projects endpoint")
return []
data = response.json()
print(f" ✅ Found {len(data)} starter projects")
return data
except Exception as e:
print(f"⚠️ Error fetching starter projects: {e}")
if hasattr(e, "response"):
print(f" Status code: {e.response.status_code}")
print(f" Response text: {e.response.text}")
return []
async def list_available_flows(host: str, access_token: str) -> list[tuple[str, str, str]]:
"""List all available starter project flows from Langflow API.
Args:
host: Langflow host URL
access_token: JWT access token for authentication
Returns:
List of tuples: (flow_name, flow_name, description)
"""
projects = await get_starter_projects_from_api(host, access_token)
# Known starter project names and descriptions based on the source code
known_projects = [
(
"Basic Prompting",
"Basic Prompting",
"A simple chat interface with OpenAI that answers like a pirate ✅ Great for load testing",
),
(
"Blog Writer",
"Blog Writer",
"Generate blog posts using web content as reference material ✅ Good for load testing",
),
(
"Document Q&A",
"Document Q&A",
"Question and answer system for document content ⚠️ Requires file uploads - not ideal for load testing",
),
("Memory Chatbot", "Memory Chatbot", "Chatbot with conversation memory using context ✅ Good for load testing"),
(
"Vector Store RAG",
"Vector Store RAG",
"Retrieval-Augmented Generation with vector storage ⚠️ May require setup - test first",
),
]
# Return the known projects if we have the expected number
if len(projects) == len(known_projects):
return known_projects
# Fallback: generate names based on project count
flows: list[tuple[str, str, str]] = []
for i, project in enumerate(projects):
if i < len(known_projects):
flow_name, name, description = known_projects[i]
else:
flow_name = f"Starter Project {i + 1}"
name = flow_name
description = "Starter project flow"
flows.append((flow_name, name, description))
return flows
async def get_flow_data_by_name(host: str, access_token: str, flow_name: str) -> dict | None:
"""Get flow data for a specific starter project by name.
Args:
host: Langflow host URL
access_token: JWT access token for authentication
flow_name: Name of the flow to retrieve
Returns:
Flow data as dictionary, or None if not found
"""
projects = await get_starter_projects_from_api(host, access_token)
flows = await list_available_flows(host, access_token)
# Find the project by name and get its index
for i, (fname, name, _) in enumerate(flows):
if name == flow_name:
if i < len(projects):
# Add the name and description to the project data
project_data = projects[i].copy()
project_data["name"] = name
project_data["description"] = flows[i][2]
return project_data
print(f"⚠️ Flow '{flow_name}' not found in starter projects")
return None
async def select_flow_interactive(host: str, access_token: str) -> str | None:
"""Interactive flow selection."""
flows = await list_available_flows(host, access_token)
if not flows:
print("❌ No starter project flows found!")
return None
print(f"\n{'=' * 80}")
print("AVAILABLE STARTER PROJECT FLOWS")
print(f"{'=' * 80}")
for i, (flow_name, name, description) in enumerate(flows, 1):
print(f"{i:2d}. {name}")
print(f" {description[:70]}{'...' if len(description) > 70 else ''}")
print()
while True:
try:
choice = input(f"Select a flow (1-{len(flows)}) or 'q' to quit: ").strip()
if choice.lower() == "q":
return None
choice_num = int(choice)
if 1 <= choice_num <= len(flows):
selected_flow = flows[choice_num - 1]
print(f"\n✅ Selected: {selected_flow[1]}")
return selected_flow[0] # Return flow name
print(f"Please enter a number between 1 and {len(flows)}")
except ValueError:
print("Please enter a valid number or 'q' to quit")
except KeyboardInterrupt:
print("\n\nSetup cancelled by user")
return None
async def setup_langflow_environment(host: str, flow_name: str | None = None, interactive: bool = False) -> dict:
"""Set up complete Langflow environment with real starter project flows."""
try:
import httpx
except ImportError:
print("❌ Missing dependency: httpx")
print("Install with: pip install httpx")
sys.exit(1)
# Configuration - use default Langflow credentials
username = "langflow"
password = "langflow"
setup_state = {
"host": host,
"username": username,
"password": password,
"user_id": None,
"access_token": None,
"api_key": None,
"flow_id": None,
"flow_name": None,
"flow_data": None,
}
async with httpx.AsyncClient(base_url=host, timeout=60.0) as client:
# Step 1: Health check
print(f"\n1. Checking Langflow health at {host}...")
try:
health_response = await client.get("/health")
if health_response.status_code != 200:
raise Exception(f"Health check failed: {health_response.status_code}")
print(" ✅ Langflow is running and accessible")
except Exception as e:
print(f" ❌ Health check failed: {e}")
raise
# Step 2: Skip user creation, use default credentials
print("2. Using default Langflow credentials...")
print(f" ✅ Using username: {username}")
# Step 3: Login to get JWT token
print("3. Authenticating...")
login_data = {
"username": username,
"password": password,
}
try:
login_response = await client.post(
"/api/v1/login",
data=login_data, # OAuth2PasswordRequestForm expects form data
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
if login_response.status_code != 200:
raise Exception(f"Login failed: {login_response.status_code} - {login_response.text}")
tokens = login_response.json()
setup_state["access_token"] = tokens["access_token"]
print(" ✅ Authentication successful")
except Exception as e:
print(f" ❌ Authentication failed: {e}")
raise
# Step 4: Create API key
print("4. Creating API key...")
headers = {"Authorization": f"Bearer {setup_state['access_token']}"}
try:
api_key_data = {"name": f"Load Test Key - {int(time.time())}"}
api_key_response = await client.post("/api/v1/api_key/", json=api_key_data, headers=headers)
if api_key_response.status_code != 200:
raise Exception(f"API key creation failed: {api_key_response.status_code} - {api_key_response.text}")
api_key_info = api_key_response.json()
setup_state["api_key"] = api_key_info["api_key"]
print(f" ✅ API key created: {api_key_info['api_key'][:20]}...")
except Exception as e:
print(f" ❌ API key creation failed: {e}")
raise
# Step 5: Select and load flow from API
print("5. Selecting starter project flow...")
# Flow selection logic
selected_flow_name = None
if interactive:
selected_flow_name = await select_flow_interactive(host, setup_state["access_token"])
if not selected_flow_name:
print("No flow selected. Exiting.")
sys.exit(0)
elif flow_name:
# Verify the flow exists in the API
flows = await list_available_flows(host, setup_state["access_token"])
for fname, name, _ in flows:
if name.lower() == flow_name.lower():
selected_flow_name = name
break
if not selected_flow_name:
print(f"❌ Flow '{flow_name}' not found in starter projects!")
print("Available flows:")
for _, name, _ in flows:
print(f" - {name}")
sys.exit(1)
else:
# Default to Basic Prompting
selected_flow_name = "Basic Prompting"
print(" Using default flow: Basic Prompting")
# Get flow data from API
flow_data = await get_flow_data_by_name(host, setup_state["access_token"], selected_flow_name)
if not flow_data:
print(f"❌ Could not load flow data for '{selected_flow_name}'")
sys.exit(1)
setup_state["flow_name"] = flow_data.get("name", selected_flow_name)
setup_state["flow_data"] = flow_data
print(f" ✅ Selected flow: {setup_state['flow_name']}")
print(f" Description: {flow_data.get('description', 'No description')}")
# Step 6: Upload the selected flow
print(f"6. Uploading flow: {setup_state['flow_name']}...")
try:
# Prepare flow data for upload
# Remove the id to let Langflow generate a new one
flow_upload_data = flow_data.copy()
if "id" in flow_upload_data:
del flow_upload_data["id"]
# Ensure endpoint_name is unique and valid (only letters, numbers, hyphens, underscores)
import re
sanitized_name = re.sub(r"[^a-zA-Z0-9_-]", "_", setup_state["flow_name"].lower())
flow_upload_data["endpoint_name"] = f"loadtest_{int(time.time())}_{sanitized_name}"
flow_response = await client.post("/api/v1/flows/", json=flow_upload_data, headers=headers)
if flow_response.status_code != 201:
raise Exception(f"Flow upload failed: {flow_response.status_code} - {flow_response.text}")
flow_info = flow_response.json()
setup_state["flow_id"] = flow_info["id"]
print(" ✅ Flow uploaded successfully")
print(f" Flow ID: {flow_info['id']}")
print(f" Endpoint: {flow_info.get('endpoint_name', 'N/A')}")
except Exception as e:
print(f" ❌ Flow upload failed: {e}")
raise
return setup_state
def print_setup_results(setup_state: dict):
"""Print the setup results in a clear format."""
print(f"\n{'=' * 80}")
print("SETUP COMPLETE - LOAD TEST CREDENTIALS")
print(f"{'=' * 80}")
print(f"Host: {setup_state['host']}")
print(f"Username: {setup_state['username']}")
print(f"Password: {setup_state['password']}")
print(f"User ID: {setup_state.get('user_id', 'N/A')}")
print(f"JWT Token: {setup_state['access_token'][:50]}..." if setup_state["access_token"] else "N/A")
print(f"API Key: {setup_state['api_key']}")
print(f"Flow ID: {setup_state['flow_id']}")
print(f"Flow Name: {setup_state['flow_name']}")
print(f"{'=' * 80}")
print("\n📋 COPY THESE COMMANDS TO RUN LOAD TESTS:")
print(f"{'=' * 80}")
# Environment variables for easy copy-paste
print("# Set environment variables:")
print(f"export LANGFLOW_HOST='{setup_state['host']}'")
print(f"export API_KEY='{setup_state['api_key']}'")
print(f"export FLOW_ID='{setup_state['flow_id']}'")
print()
# Direct locust commands
print("# Run load test with web UI:")
print(f"locust -f locustfile.py --host {setup_state['host']}")
print()
print("# Run headless load test (50 users, 2 minutes):")
print(f"locust -f locustfile.py --host {setup_state['host']} --headless --users 50 --spawn-rate 5 --run-time 120s")
print()
print("# Run with load shape:")
print(
f"SHAPE=ramp100 locust -f locustfile.py --host {setup_state['host']} --headless --users 100 --spawn-rate 5 --run-time 180s"
)
print()
print("# Or use the runner script:")
print(
f"python run_load_test.py --host {setup_state['host']} --no-start-langflow --headless --users 25 --duration 120"
)
print()
print("# Generate HTML report:")
print(
f"python run_load_test.py --host {setup_state['host']} --no-start-langflow --headless --users 50 --duration 180 --html report.html"
)
print(f"\n{'=' * 80}")
def save_credentials(setup_state: dict, output_file: str):
"""Save credentials to a file for later use."""
credentials = {
"host": setup_state["host"],
"api_key": setup_state["api_key"],
"flow_id": setup_state["flow_id"],
"flow_name": setup_state["flow_name"],
"username": setup_state["username"],
"password": setup_state["password"],
"access_token": setup_state["access_token"],
"created_at": time.time(),
}
try:
with open(output_file, "w") as f:
json.dump(credentials, f, indent=2)
print(f"\n💾 Credentials saved to: {output_file}")
except Exception as e:
print(f"⚠️ Could not save credentials: {e}")
def main():
parser = argparse.ArgumentParser(
description="Set up Langflow load test environment with real starter project flows",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Interactive flow selection
python setup_langflow_test.py --interactive
# Use specific flow
python setup_langflow_test.py --flow "Memory Chatbot"
# List available flows
python setup_langflow_test.py --list-flows
# Setup with custom host
python setup_langflow_test.py --host http://localhost:8000 --interactive
# Save credentials to file
python setup_langflow_test.py --interactive --save-credentials test_creds.json
""",
)
parser.add_argument(
"--host",
default="http://localhost:7860",
help="Langflow host URL (default: http://localhost:7860, use https:// for remote instances)",
)
parser.add_argument("--flow", help="Name of the starter project flow to use")
parser.add_argument("--interactive", action="store_true", help="Interactive flow selection")
parser.add_argument("--list-flows", action="store_true", help="List available starter project flows and exit")
parser.add_argument("--save-credentials", metavar="FILE", help="Save credentials to a JSON file")
args = parser.parse_args()
# List flows and exit
if args.list_flows:
async def list_flows_only():
try:
import httpx
except ImportError:
print("❌ Missing dependency: httpx")
print("Install with: pip install httpx")
sys.exit(1)
# Quick authentication to access the API
username = "langflow"
password = "langflow"
async with httpx.AsyncClient(base_url=args.host, timeout=30.0) as client:
# Health check
try:
health_response = await client.get("/health")
if health_response.status_code != 200:
raise Exception(f"Langflow not available at {args.host}")
except Exception as e:
print(f"❌ Cannot connect to Langflow at {args.host}: {e}")
sys.exit(1)
# Login to get access token
try:
login_data = {"username": username, "password": password}
login_response = await client.post(
"/api/v1/login",
data=login_data,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
if login_response.status_code != 200:
raise Exception(f"Authentication failed: {login_response.status_code}")
tokens = login_response.json()
access_token = tokens["access_token"]
except Exception as e:
print(f"❌ Authentication failed: {e}")
print("Make sure Langflow is running with default credentials (langflow/langflow)")
sys.exit(1)
# Get flows from API
flows = await list_available_flows(args.host, access_token)
if not flows:
print("❌ No starter project flows found!")
sys.exit(1)
print(f"\n{'=' * 80}")
print("AVAILABLE STARTER PROJECT FLOWS")
print(f"{'=' * 80}")
for flow_name, name, description in flows:
print(f"📄 {name}")
print(f" Description: {description}")
print()
print(f"Total: {len(flows)} flows available")
asyncio.run(list_flows_only())
sys.exit(0)
# Validate arguments
if not args.interactive and not args.flow:
print("❌ Either --interactive or --flow must be specified")
print("Use --help for more information")
sys.exit(1)
try:
# Run the setup
setup_state = asyncio.run(
setup_langflow_environment(host=args.host, flow_name=args.flow, interactive=args.interactive)
)
# Print results
print_setup_results(setup_state)
# Save credentials if requested
if args.save_credentials:
save_credentials(setup_state, args.save_credentials)
print("\n🚀 Environment setup complete! You can now run load tests.")
except KeyboardInterrupt:
print("\n\n⚠️ Setup cancelled by user")
sys.exit(1)
except Exception as e:
print(f"\n❌ Setup failed: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/langflow_setup_test.py",
"license": "MIT License",
"lines": 451,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/lfx_serve_locustfile.py | """LFX Locust Load Testing File.
Based on the weakness-focused stress test scripts with additional user behaviors.
Includes production-ready fixes for timing, error handling, and reporting.
This file tests the LFX API (complex serve), not the Langflow API.
Usage:
# Run with web UI (recommended)
locust -f locustfile_complex_serve.py --host http://127.0.0.1:8000
# Run headless with built-in shape
locust -f locustfile_complex_serve.py --host http://127.0.0.1:8000 --headless --shape RampToHundred
# Run distributed (master)
locust -f locustfile_complex_serve.py --host http://127.0.0.1:8000 --master
# Run distributed (worker)
locust -f locustfile_complex_serve.py --host http://127.0.0.1:8000 --worker --master-host=localhost
Environment Variables:
- FLOW_ID: Flow ID to test (default: 5523731d-5ef3-56de-b4ef-59b0a224fdbc)
- API_KEY: API key for authentication (default: test)
- REQUEST_TIMEOUT: Request timeout in seconds (default: 10)
- SHAPE: Load test shape to use (default: none, options: ramp100)
"""
import inspect
import json
import os
import random
import time
import gevent
from locust import FastHttpUser, LoadTestShape, between, constant, constant_pacing, events, task
# Configuration
FLOW_ID = os.getenv("FLOW_ID", "5523731d-5ef3-56de-b4ef-59b0a224fdbc")
API_KEY = os.getenv("API_KEY", "test")
API_ENDPOINT = f"/flows/{FLOW_ID}/run"
# Test messages with realistic distribution
TEST_MESSAGES = {
"minimal": "Hi",
"simple": "Can you help me?",
"medium": "I need help understanding how machine learning works in this context.",
"complex": "Please analyze this data: " + "x" * 500 + " and provide detailed insights.",
"large": "Here's a complex scenario: " + "data " * 1000,
}
# Weighted message distribution for realistic load
MESSAGE_WEIGHTS = [("simple", 50), ("medium", 30), ("minimal", 15), ("complex", 4), ("large", 1)]
# Load test shapes
class RampToHundred(LoadTestShape):
"""0 -> 100 users at 5 users/sec (20s ramp), then hold until 180s total.
Matches the TLDR test pattern: 3 minutes, ramping to 100 users.
"""
spawn_rate = 5
target_users = 100
total_duration = 180 # seconds
def tick(self):
run_time = self.get_run_time()
if run_time >= self.total_duration:
return None
users = min(int(run_time * self.spawn_rate), self.target_users)
return users, self.spawn_rate
class StepRamp(LoadTestShape):
"""Step ramp for finding performance cliffs.
Steps every 30 seconds: 5 -> 10 -> 15 -> 20 -> 25 -> 30 -> 35 users.
Each step holds for exactly 30 seconds to measure steady-state performance.
"""
def tick(self):
run_time = self.get_run_time()
# Define the step progression with 30-second intervals
steps = [
(30, 5), # 0-30s: 5 users
(60, 10), # 30-60s: 10 users
(90, 15), # 60-90s: 15 users
(120, 20), # 90-120s: 20 users
(150, 25), # 120-150s: 25 users
(180, 30), # 150-180s: 30 users
(210, 35), # 180-210s: 35 users
(240, 40), # 210-240s: 40 users
(270, 45), # 240-270s: 45 users
(300, 50), # 270-300s: 50 users
]
# Find current step
for time_limit, user_count in steps:
if run_time < time_limit:
return user_count, 10 # Fast spawn rate for quick transitions
return None # End test after 300 seconds
# Environment-scoped metrics tracking (fixes the event listener issue)
_env_bags = {}
@events.test_start.add_listener
def on_test_start(environment, **_kwargs):
"""Initialize per-environment metrics tracking."""
_env_bags[environment] = {
"slow_10s": 0,
"slow_20s": 0,
}
@events.request.add_listener
def on_request(request_type, name, response_time, response_length, exception, context, **kwargs): # noqa: ARG001
"""Track slow requests using Locust's built-in timing."""
# response_time is in milliseconds from Locust
bag = _env_bags.get(context.get("environment") if context else None)
if bag is None:
# fallback: try the single environment we likely have
if len(_env_bags) == 1:
bag = next(iter(_env_bags.values()))
else:
return
if exception is None: # Only count successful requests for timing
if response_time > 10_000: # 10 seconds in ms
bag["slow_10s"] += 1
if response_time > 20_000: # 20 seconds in ms
bag["slow_20s"] += 1
@events.test_stop.add_listener
def on_test_stop(environment, **_kwargs):
"""Print comprehensive test summary with performance grading."""
stats = environment.stats.total
if stats.num_requests == 0:
return
# Get percentiles and basic stats
stats.get_response_time_percentile(0.50) or 0
p95 = stats.get_response_time_percentile(0.95) or 0
stats.get_response_time_percentile(0.99) or 0
fail_ratio = stats.fail_ratio
getattr(stats, "current_rps", 0.0)
# Get slow request counts
_env_bags.get(environment, {"slow_10s": 0, "slow_20s": 0})
# Performance grading based on production criteria
grade = "A"
issues = []
if fail_ratio > 0.01:
grade = "B"
issues.append(f"fail {fail_ratio:.1%}")
if fail_ratio > 0.05:
grade = "C"
if p95 > 10_000:
grade = max(grade, "D")
issues.append(f"p95 {p95 / 1000:.1f}s")
if p95 > 20_000:
grade = "F"
issues.append(f"p95 {p95 / 1000:.1f}s")
# Production readiness assessment
if grade in ["A", "B"] or grade == "C":
pass
else:
pass
# Cleanup
_env_bags.pop(environment, None)
class BaseLfxUser(FastHttpUser):
"""Base class for all LFX API load testing user types."""
abstract = True
REQUEST_TIMEOUT = float(os.getenv("REQUEST_TIMEOUT", "10")) # Tighter timeout for production
def on_start(self):
"""Called when a user starts before any task is scheduled."""
self.session_id = f"locust_{self.__class__.__name__}_{id(self)}_{int(time.time())}"
self.request_count = 0
def make_request(self, message_type="simple", tag_suffix=""):
"""Make a request with proper error handling and timing.
Uses Locust's built-in response time measurement.
"""
message = TEST_MESSAGES.get(message_type, TEST_MESSAGES["simple"])
payload = {"input_value": message, "session_id": f"{self.session_id}_{self.request_count}"}
headers = {"x-api-key": API_KEY, "Content-Type": "application/json"}
self.request_count += 1
name = f"{API_ENDPOINT} [{message_type}{tag_suffix}]"
with self.client.post(
API_ENDPOINT,
json=payload,
headers=headers,
name=name,
timeout=self.REQUEST_TIMEOUT,
catch_response=True,
) as response:
# Handle successful responses
if response.status_code == 200:
try:
data = response.json()
except json.JSONDecodeError:
return response.failure("Invalid JSON response")
# Strictly check for success=True in the response payload
success = data.get("success")
if success is True:
return response.success()
# Application-level failure - success is False, None, or missing
msg = str(data.get("result", "Unknown error"))[:200]
success_status = f"success={success}" if success is not None else "success=missing"
return response.failure(f"Flow failed ({success_status}): {msg}")
# Handle specific error cases for better monitoring
if response.status_code in (429, 503):
return response.failure(f"Backpressure/capacity: {response.status_code}")
if response.status_code == 401:
return response.failure("Unauthorized - API key issue")
if response.status_code == 404:
return response.failure("Flow not found - check FLOW_ID")
if response.status_code >= 500:
return response.failure(f"Server error {response.status_code}")
return response.failure(f"HTTP {response.status_code}")
class NormalUser(BaseLfxUser):
"""Normal user simulating typical API interactions.
Based on the main stress test patterns with realistic message distribution.
"""
weight = 3
wait_time = between(0.5, 2) # Typical user think time
@task(80)
def send_message(self):
"""Main task: Send a message with weighted distribution."""
message_type = random.choices([w[0] for w in MESSAGE_WEIGHTS], weights=[w[1] for w in MESSAGE_WEIGHTS], k=1)[0] # noqa: S311
self.make_request(message_type=message_type)
@task(15)
def send_burst(self):
"""Send a burst of 3 small messages quickly."""
for i in range(3):
self.make_request(message_type="minimal", tag_suffix=f"-burst{i}")
gevent.sleep(0.1) # Small delay between burst requests
@task(5)
def send_complex(self):
"""Occasionally send complex requests that stress the system."""
self.make_request(message_type="complex")
class AggressiveUser(BaseLfxUser):
"""Aggressive user with minimal wait times.
Tests the system under extreme concurrent load.
"""
weight = 3
wait_time = between(0.1, 0.3) # Very aggressive
@task
def rapid_fire(self):
"""Send requests as fast as possible."""
self.make_request(message_type="simple", tag_suffix="-rapid")
class SustainedLoadUser(BaseLfxUser):
"""Maintains exactly 1 request/second for steady load testing.
Based on constant throughput testing patterns.
"""
weight = 3
wait_time = constant_pacing(1) # Exactly 1 request per second per user
@task
def steady_load(self):
"""Send requests at constant 1 RPS per user."""
self.make_request(message_type="medium", tag_suffix="-steady")
class TailLatencyHunter(BaseLfxUser):
"""Mixed workload designed to expose tail latency issues.
Alternates between light and heavy requests to stress the system.
"""
weight = 3
wait_time = between(0.8, 1.5)
@task
def hunt_tail_latency(self):
"""Alternate between simple and complex requests to find tail latency."""
if random.random() < 0.7: # noqa: S311
self.make_request(message_type="simple", tag_suffix="-tail")
else:
self.make_request(message_type="large", tag_suffix="-tail-heavy")
class ScalabilityTestUser(BaseLfxUser):
"""Tests for the scalability cliff at 30 users.
Uses patterns that specifically stress concurrency limits.
"""
weight = 3
wait_time = constant(1.0) # Constant load to test scaling
@task
def scalability_test(self):
"""Send medium complexity requests to test scaling limits."""
self.make_request(message_type="medium", tag_suffix="-scale")
class BurstUser(BaseLfxUser):
"""Sends bursts of 10 requests to test connection pooling.
Based on connection pool exhaustion test patterns.
"""
weight = 3
wait_time = between(5, 10) # Long wait between bursts
@task
def burst_attack(self):
"""Send a burst of 10 requests quickly to test connection handling."""
for i in range(10):
self.make_request(message_type="minimal", tag_suffix=f"-burst{i}")
gevent.sleep(0.05) # 50ms between requests in burst
# Auto-select shape based on environment variable
_shape_env = os.getenv("SHAPE", "").lower()
_selected = None
if _shape_env == "stepramp":
_selected = StepRamp
elif _shape_env == "ramp100":
_selected = RampToHundred
if _selected:
# Create a single exported shape class and remove others so Locust sees only one
class SelectedLoadTestShape(_selected):
pass
# Remove other shape classes so Locust auto-picks the selected one
for _name, _obj in list(globals().items()):
if (
inspect.isclass(_obj)
and issubclass(_obj, LoadTestShape)
and _obj is not SelectedLoadTestShape
and _obj is not LoadTestShape
):
del globals()[_name]
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/lfx_serve_locustfile.py",
"license": "MIT License",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/locust/lfx_step_ramp.py | """LFX Step Ramp Load Test for Finding Performance Cliffs.
This file tests the LFX API (complex serve), not the Langflow API.
Steps every 30 seconds: 5 -> 10 -> 15 -> 20 -> 25 -> 30 -> 35 users.
Each step holds for exactly 30 seconds to measure steady-state performance.
"""
import json
import os
import time
from locust import FastHttpUser, LoadTestShape, between, events, task
# Configuration
FLOW_ID = os.getenv("FLOW_ID", "5523731d-5ef3-56de-b4ef-59b0a224fdbc")
API_KEY = os.getenv("API_KEY", "test")
API_ENDPOINT = f"/flows/{FLOW_ID}/run"
# Test messages
TEST_MESSAGES = {
"minimal": "Hi",
"simple": "Can you help me?",
"medium": "I need help understanding how machine learning works in this context.",
"complex": "Please analyze this data: " + "x" * 500 + " and provide detailed insights.",
"large": "Here's a complex scenario: " + "data " * 1000,
}
MESSAGE_WEIGHTS = [("simple", 50), ("medium", 30), ("minimal", 15), ("complex", 4), ("large", 1)]
class StepRamp(LoadTestShape):
"""Step ramp for finding performance cliffs."""
def tick(self):
run_time = self.get_run_time()
# Step every 30 seconds
steps = [
(30, 5), # 0-30s: 5 users
(60, 10), # 30-60s: 10 users
(90, 15), # 60-90s: 15 users
(120, 20), # 90-120s: 20 users
(150, 25), # 120-150s: 25 users
(180, 30), # 150-180s: 30 users
(210, 35), # 180-210s: 35 users
(240, 40), # 210-240s: 40 users
(270, 45), # 240-270s: 45 users
(300, 50), # 270-300s: 50 users
]
for time_limit, user_count in steps:
if run_time < time_limit:
return user_count, 10
return None
# Event handlers for metrics
_env_bags = {}
@events.test_start.add_listener
def on_test_start(environment, **_kwargs):
_env_bags[environment] = {"slow_10s": 0, "slow_20s": 0}
@events.request.add_listener
def on_request(request_type, name, response_time, response_length, exception, context, **kwargs): # noqa: ARG001
"""Track slow requests using Locust's built-in timing."""
# response_time is in milliseconds from Locust
bag = _env_bags.get(context.get("environment") if context else None)
if bag is None:
# fallback: try the single environment we likely have
if len(_env_bags) == 1:
bag = next(iter(_env_bags.values()))
else:
return
if exception is None: # Only count successful requests for timing
if response_time > 10_000: # 10 seconds in ms
bag["slow_10s"] += 1
if response_time > 20_000: # 20 seconds in ms
bag["slow_20s"] += 1
@events.test_stop.add_listener
def on_test_stop(environment, **_kwargs):
stats = environment.stats.total
if stats.num_requests == 0:
return
p95 = stats.get_response_time_percentile(0.95) or 0
fail_ratio = stats.fail_ratio
_env_bags.get(environment, {"slow_10s": 0, "slow_20s": 0})
if fail_ratio > 0.05 or p95 > 10_000:
pass
else:
pass
_env_bags.pop(environment, None)
class BaseLangflowUser(FastHttpUser):
abstract = True
REQUEST_TIMEOUT = float(os.getenv("REQUEST_TIMEOUT", "10"))
def on_start(self):
self.session_id = f"step_{self.__class__.__name__}_{id(self)}_{int(time.time())}"
self.request_count = 0
def make_request(self, message_type="simple", tag_suffix=""):
message = TEST_MESSAGES.get(message_type, TEST_MESSAGES["simple"])
payload = {"input_value": message, "session_id": f"{self.session_id}_{self.request_count}"}
headers = {"x-api-key": API_KEY, "Content-Type": "application/json"}
self.request_count += 1
name = f"{API_ENDPOINT} [{message_type}{tag_suffix}]"
with self.client.post(
API_ENDPOINT,
json=payload,
headers=headers,
name=name,
timeout=self.REQUEST_TIMEOUT,
catch_response=True,
) as response:
# Handle successful responses
if response.status_code == 200:
try:
data = response.json()
except json.JSONDecodeError:
return response.failure("Invalid JSON response")
# Strictly check for success=True in the response payload
success = data.get("success")
if success is True:
return response.success()
# Application-level failure - success is False, None, or missing
msg = str(data.get("result", "Unknown error"))[:200]
success_status = f"success={success}" if success is not None else "success=missing"
return response.failure(f"Flow failed ({success_status}): {msg}")
if response.status_code in (429, 503):
return response.failure(f"Backpressure: {response.status_code}")
if response.status_code == 401:
return response.failure("Unauthorized")
if response.status_code == 404:
return response.failure("Not Found - possible bad FLOW_ID or misconfiguration")
if response.status_code >= 500:
return response.failure(f"Server error {response.status_code}")
return response.failure(f"HTTP {response.status_code}")
class StepTestUser(BaseLangflowUser):
"""User class for step ramp testing - sends medium complexity requests."""
wait_time = between(1, 2)
@task
def step_test(self):
self.make_request(message_type="medium", tag_suffix="-step")
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/locust/lfx_step_ramp.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:scripts/check_changes_filter.py | #!/usr/bin/env python3
r"""Script to verify that all changed files in src/frontend are covered by patterns in changes-filter.yaml.
This ensures that CI workflows (especially Playwright tests) are triggered appropriately for frontend changes.
Usage:
# Check files changed in current branch vs main
git diff --name-only origin/main HEAD | python scripts/check_changes_filter.py
# Check specific files
echo -e "src/frontend/file1.tsx\nsrc/frontend/file2.ts" | python scripts/check_changes_filter.py
Note:
Only files under src/frontend/ are checked. All other files are ignored.
Exit codes:
0 - All frontend files are covered by patterns
1 - Some frontend files are not covered (or error occurred)
"""
import sys
from pathlib import Path
import yaml
def load_filter_patterns(filter_file: Path) -> dict[str, list[str]]:
"""Load all patterns from the changes-filter.yaml file.
Validates and normalizes the YAML structure to ensure it's a dict mapping
str to list[str]. Handles top-level "filters" key if present.
"""
with filter_file.open() as f:
data = yaml.safe_load(f)
# Handle empty or null file
if data is None:
return {}
# If there's a top-level "filters" key, use that instead
if isinstance(data, dict) and "filters" in data:
data = data["filters"]
# Ensure we have a dict
if not isinstance(data, dict):
msg = f"Expected dict at top level, got {type(data).__name__}"
raise TypeError(msg)
# Normalize and validate the structure
result: dict[str, list[str]] = {}
for key, value in data.items():
# Validate key is a string
if not isinstance(key, str):
msg = f"Expected string key, got {type(key).__name__}: {key}"
raise TypeError(msg)
# Coerce single string to list
normalized_value = [value] if isinstance(value, str) else value
# Validate value is a list
if not isinstance(normalized_value, list):
msg = f"Expected list for key '{key}', got {type(normalized_value).__name__}"
raise TypeError(msg)
# Validate all items in the list are strings
for i, item in enumerate(normalized_value):
if not isinstance(item, str):
msg = f"Expected string in list for key '{key}' at index {i}, got {type(item).__name__}"
raise TypeError(msg)
result[key] = normalized_value
return result
def get_changed_files_from_stdin() -> list[str]:
"""Get list of changed files from stdin (one per line), filtered to src/frontend only."""
files = []
for line in sys.stdin:
stripped = line.strip()
if stripped and stripped.startswith("src/frontend/"):
files.append(stripped)
return files
def matches_pattern(file_path: str, pattern: str) -> bool:
"""Check if a file matches a glob pattern using pathlib semantics.
Supports ** and a simple one-level {a,b} brace expansion.
"""
import re
from pathlib import PurePosixPath
# Normalize
file_path = file_path.lstrip("./").replace("\\", "/")
pattern = pattern.lstrip("./")
# Simple one-level brace expansion: foo.{ts,tsx} -> [foo.ts, foo.tsx]
patterns = [pattern]
m = re.search(r"\{([^{}]+)\}", pattern)
if m:
opts = [opt.strip() for opt in m.group(1).split(",")]
pre, post = pattern[: m.start()], pattern[m.end() :]
patterns = [f"{pre}{opt}{post}" for opt in opts]
# PurePosixPath.match() only does relative matching from the right
# For patterns with **, we need full path matching
for pat in patterns:
if "**" in pat:
# Use fnmatch-style matching for ** patterns
# Convert ** to match any depth
import fnmatch
regex_pattern = pat.replace("**", "*")
if fnmatch.fnmatch(file_path, regex_pattern):
return True
else:
# Use pathlib matching for non-** patterns
p = PurePosixPath(file_path)
if p.match(pat):
return True
return False
def check_file_coverage(changed_files: list[str], filter_patterns: dict[str, list[str]]) -> tuple[list[str], list[str]]:
"""Check which files are covered by at least one pattern.
Returns: (covered_files, uncovered_files)
"""
# Flatten all patterns from all categories
all_patterns = []
for category_patterns in filter_patterns.values():
all_patterns.extend(category_patterns)
covered = []
uncovered = []
for file_path in changed_files:
is_covered = False
for pattern in all_patterns:
if matches_pattern(file_path, pattern):
is_covered = True
break
if is_covered:
covered.append(file_path)
else:
uncovered.append(file_path)
return covered, uncovered
def main():
"""Main execution function."""
# Get repository root
repo_root = Path(__file__).parent.parent
filter_file = repo_root / ".github" / "changes-filter.yaml"
if not filter_file.exists():
print(f"Error: Filter file not found at {filter_file}")
sys.exit(1)
# Load filter patterns
filter_patterns = load_filter_patterns(filter_file)
# Get changed files from stdin
changed_files = get_changed_files_from_stdin()
if not changed_files:
print("No changed files detected.")
return
print(f"Checking {len(changed_files)} changed file(s) against filter patterns...")
print()
# Check coverage
covered, uncovered = check_file_coverage(changed_files, filter_patterns)
# Report results
if uncovered:
print("❌ FAILURE: The following files are NOT covered by any pattern in changes-filter.yaml:")
print()
for file_path in sorted(uncovered):
print(f" - {file_path}")
print()
print(f"Total: {len(uncovered)} uncovered file(s) out of {len(changed_files)}")
print()
print("Please update .github/changes-filter.yaml to include patterns for these files.")
sys.exit(1)
else:
print("✅ SUCCESS: All changed files are covered by patterns in changes-filter.yaml")
print()
print(f"Checked {len(changed_files)} file(s):")
for file_path in sorted(covered):
print(f" ✓ {file_path}")
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "scripts/check_changes_filter.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/base/langflow/api/utils/mcp/config_utils.py | import asyncio
import platform
from asyncio.subprocess import create_subprocess_exec
from datetime import datetime, timezone
from uuid import UUID
from fastapi import HTTPException
from lfx.base.mcp.constants import MAX_MCP_SERVER_NAME_LENGTH
from lfx.base.mcp.util import sanitize_mcp_name
from lfx.log import logger
from lfx.services.deps import get_settings_service
from sqlmodel import select
from langflow.api.v2.mcp import get_server_list, update_server
from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings
from langflow.services.database.models import Flow, Folder
from langflow.services.database.models.api_key.crud import create_api_key
from langflow.services.database.models.api_key.model import ApiKeyCreate
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_storage_service
ALL_INTERFACES_HOST = "0.0.0.0" # noqa: S104
class MCPServerValidationResult:
"""Represents the result of an MCP server validation check.
This class encapsulates the outcome of checking whether an MCP server
configuration can be safely created or updated for a given project. The typical
sequence is as follows:
1. Initiation: An operation requiring an MCP server (e.g., creating a
new project with MCP enabled) triggers a validation check.
2. Validation: The validate_mcp_server_for_project function is called.
It generates the expected server name from the project name and checks
if a server with that name already exists.
3. Ownership Check: If a server exists, the function verifies if it
belongs to the current project by checking for the project's UUID in
the server's configuration.
4. Result: An instance of this class is returned, summarizing whether
the server exists and if the project ID matches.
5. Decision: The calling code uses the properties of this result
(has_conflict, should_skip, should_proceed) to determine the next
action, such as aborting on conflict, skipping if already configured,
or proceeding with the setup.
"""
def __init__(
self,
*,
server_exists: bool,
project_id_matches: bool,
server_name: str = "",
existing_config: dict | None = None,
conflict_message: str = "",
):
self.server_exists = server_exists
self.project_id_matches = project_id_matches
self.server_name = server_name
self.existing_config = existing_config
self.conflict_message = conflict_message
@property
def has_conflict(self) -> bool:
"""Returns True when an MCP server name collision occurs.
This indicates that another project is already using the desired server name.
"""
return self.server_exists and not self.project_id_matches
@property
def should_skip(self) -> bool:
"""Returns True when the MCP server configuration is already correct for this project.
This indicates that the server exists and is properly configured for the current project.
"""
return self.server_exists and self.project_id_matches
@property
def should_proceed(self) -> bool:
"""Returns True when MCP server setup can proceed safely without conflicts.
This indicates either no server exists (safe to create) or the existing server
belongs to the current project (safe to update).
"""
return not self.server_exists or self.project_id_matches
async def validate_mcp_server_for_project(
project_id: UUID,
project_name: str,
user,
session,
storage_service,
settings_service,
operation: str = "create",
) -> MCPServerValidationResult:
"""Validate MCP server for a project operation.
Args:
project_id: The project UUID
project_name: The project name
user: The user performing the operation
session: Database session
storage_service: Storage service
settings_service: Settings service
operation: Operation type ("create", "update", "delete")
Returns:
MCPServerValidationResult with validation details
"""
# Generate server name that would be used for this project
server_name = f"lf-{sanitize_mcp_name(project_name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}"
try:
existing_servers = await get_server_list(user, session, storage_service, settings_service)
if server_name not in existing_servers.get("mcpServers", {}):
# Server doesn't exist
return MCPServerValidationResult(
project_id_matches=False,
server_exists=False,
server_name=server_name,
)
# Server exists - check if project ID matches
existing_server_config = existing_servers["mcpServers"][server_name]
existing_args = existing_server_config.get("args", [])
project_id_matches = False
if existing_args:
# SSE URL is typically the last argument
# TODO: Better way Required to check the postion of the SSE URL in the args
existing_sse_urls = await extract_urls_from_strings(existing_args)
for existing_sse_url in existing_sse_urls:
if str(project_id) in existing_sse_url:
project_id_matches = True
break
else:
project_id_matches = False
# Generate appropriate conflict message based on operation
conflict_message = ""
if not project_id_matches:
if operation == "create":
conflict_message = (
f"MCP server name conflict: '{server_name}' already exists "
f"for a different project. Cannot create MCP server for project "
f"'{project_name}' (ID: {project_id})"
)
elif operation == "update":
conflict_message = (
f"MCP server name conflict: '{server_name}' exists for a different project. "
f"Cannot update MCP server for project '{project_name}' (ID: {project_id})"
)
elif operation == "delete":
conflict_message = (
f"MCP server '{server_name}' exists for a different project. "
f"Cannot delete MCP server for project '{project_name}' (ID: {project_id})"
)
return MCPServerValidationResult(
server_exists=True,
project_id_matches=project_id_matches,
server_name=server_name,
existing_config=existing_server_config,
conflict_message=conflict_message,
)
except Exception as e: # noqa: BLE001
await logger.awarning(f"Could not validate MCP server for project {project_id}: {e}")
# Return result allowing operation to proceed on validation failure
return MCPServerValidationResult(
project_id_matches=False,
server_exists=False,
server_name=server_name,
)
async def get_url_by_os(host: str, port: int, url: str) -> str:
"""Get the URL by operating system."""
os_type = platform.system()
is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower()
if is_wsl and host in {"localhost", "127.0.0.1"}:
try:
proc = await create_subprocess_exec(
"/usr/bin/hostname",
"-I",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, _ = await proc.communicate()
if proc.returncode == 0 and stdout.strip():
wsl_ip = stdout.decode().strip().split()[0] # Get first IP address
await logger.adebug("Using WSL IP for external access: %s", wsl_ip)
# Replace the localhost with the WSL IP in the URL
url = url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}")
except OSError as e:
await logger.awarning("Failed to get WSL IP address: %s. Using default URL.", str(e))
return url
async def _get_project_base_url_components() -> tuple[str, int]:
"""Return normalized host and port for building MCP URLs."""
# Get settings service to build the SSE URL
settings_service = get_settings_service()
server_host = getattr(settings_service.settings, "host", "localhost")
# Use the runtime-detected port if available, otherwise fall back to configured port
server_port = (
getattr(settings_service.settings, "runtime_port", None)
or getattr(settings_service.settings, "port", None)
or 7860
)
# For MCP clients, always use localhost instead of 0.0.0.0
# 0.0.0.0 is a bind address, not a connect address
host = "localhost" if server_host == ALL_INTERFACES_HOST else server_host
return host, server_port
async def get_project_streamable_http_url(project_id: UUID) -> str:
"""Generate the Streamable HTTP endpoint for a project (no /sse suffix)."""
host, port = await _get_project_base_url_components()
base_url = f"http://{host}:{port}".rstrip("/")
project_url = f"{base_url}/api/v1/mcp/project/{project_id}/streamable"
return await get_url_by_os(host, port, project_url)
async def get_project_sse_url(project_id: UUID) -> str:
"""Generate the legacy SSE URL for a project, including WSL handling."""
host, port = await _get_project_base_url_components()
base_url = f"http://{host}:{port}".rstrip("/")
project_sse_url = f"{base_url}/api/v1/mcp/project/{project_id}/sse"
return await get_url_by_os(host, port, project_sse_url)
async def _get_mcp_composer_auth_config(project: Folder) -> dict:
"""Decrypt and return MCP Composer auth configuration for a project."""
auth_config = None
if project.auth_settings:
decrypted_settings = decrypt_auth_settings(project.auth_settings)
if decrypted_settings:
auth_config = decrypted_settings
if not auth_config:
error_message = "Auth config is missing. Please check your settings and try again."
raise ValueError(error_message)
return auth_config
async def get_composer_streamable_http_url(project: Folder) -> str:
"""Generate Streamable HTTP URL for the MCP Composer instance."""
auth_config = await _get_mcp_composer_auth_config(project)
composer_host = auth_config.get("oauth_host")
composer_port = auth_config.get("oauth_port")
if not composer_host or not composer_port:
error_msg = "OAuth host and port are required to get the MCP Composer URL"
raise ValueError(error_msg)
composer_url = f"http://{composer_host}:{composer_port}"
return await get_url_by_os(composer_host, int(composer_port), composer_url) # type: ignore[arg-type]
async def auto_configure_starter_projects_mcp(session):
"""Auto-configure MCP servers for starter projects for all users at startup."""
# Check if auto-configure is enabled
settings_service = get_settings_service()
await logger.adebug("Starting auto-configure starter projects MCP")
if not settings_service.settings.add_projects_to_mcp_servers:
await logger.adebug("Auto-Configure MCP servers disabled, skipping starter project MCP configuration")
return
await logger.adebug(
f"Auto-configure settings: add_projects_to_mcp_servers="
f"{settings_service.settings.add_projects_to_mcp_servers}, "
f"create_starter_projects={settings_service.settings.create_starter_projects}, "
f"update_starter_projects={settings_service.settings.update_starter_projects}"
)
try:
# Get all users in the system
users = (await session.exec(select(User))).all()
await logger.adebug(f"Found {len(users)} users in the system")
if not users:
await logger.adebug("No users found, skipping starter project MCP configuration")
return
# Add starter projects to each user's MCP server configuration
total_servers_added = 0
for user in users:
await logger.adebug(f"Processing user: {user.username} (ID: {user.id})")
try:
# First, let's see what folders this user has
all_user_folders = (await session.exec(select(Folder).where(Folder.user_id == user.id))).all()
folder_names = [f.name for f in all_user_folders]
await logger.adebug(f"User {user.username} has folders: {folder_names}")
# Find THIS USER'S own starter projects folder
# Each user has their own "Starter Projects" folder with unique ID
user_starter_folder = (
await session.exec(
select(Folder).where(
Folder.name == DEFAULT_FOLDER_NAME,
Folder.user_id == user.id, # Each user has their own!
)
)
).first()
if not user_starter_folder:
await logger.adebug(
f"No starter projects folder ('{DEFAULT_FOLDER_NAME}') found for user {user.username}, skipping"
)
# Log what folders this user does have for debugging
await logger.adebug(f"User {user.username} available folders: {folder_names}")
continue
await logger.adebug(
f"Found starter folder '{user_starter_folder.name}' for {user.username}: "
f"ID={user_starter_folder.id}"
)
# Configure MCP settings for flows in THIS USER'S starter folder
flows_query = select(Flow).where(
Flow.folder_id == user_starter_folder.id,
Flow.is_component == False, # noqa: E712
)
user_starter_flows = (await session.exec(flows_query)).all()
# Enable MCP for starter flows if not already configured
flows_configured = 0
for flow in user_starter_flows:
if flow.mcp_enabled is None:
flow.mcp_enabled = True
if not flow.action_name:
flow.action_name = sanitize_mcp_name(flow.name)
if not flow.action_description:
flow.action_description = flow.description or f"Starter project: {flow.name}"
flow.updated_at = datetime.now(timezone.utc)
session.add(flow)
flows_configured += 1
if flows_configured > 0:
await logger.adebug(f"Enabled MCP for {flows_configured} starter flows for user {user.username}")
# Validate MCP server for this starter projects folder
validation_result = await validate_mcp_server_for_project(
user_starter_folder.id,
DEFAULT_FOLDER_NAME,
user,
session,
get_storage_service(),
settings_service,
operation="create",
)
# Skip if server already exists for this starter projects folder
if validation_result.should_skip:
await logger.adebug(
f"MCP server '{validation_result.server_name}' already exists for user "
f"{user.username}'s starter projects (project ID: "
f"{user_starter_folder.id}), skipping"
)
continue # Skip this user since server already exists for the same project
server_name = validation_result.server_name
# Set up THIS USER'S starter folder authentication (same as new projects)
# If AUTO_LOGIN is false, automatically enable API key authentication
default_auth = {"auth_type": "none"}
await logger.adebug(f"Settings service auth settings: {settings_service.auth_settings}")
await logger.adebug(f"User starter folder auth settings: {user_starter_folder.auth_settings}")
if (
not user_starter_folder.auth_settings
and settings_service.auth_settings.AUTO_LOGIN
and not settings_service.auth_settings.SUPERUSER
):
default_auth = {"auth_type": "apikey"}
user_starter_folder.auth_settings = encrypt_auth_settings(default_auth)
await logger.adebug(
"AUTO_LOGIN enabled without SUPERUSER; forcing API key auth for starter folder %s",
user.username,
)
elif not settings_service.auth_settings.AUTO_LOGIN and not user_starter_folder.auth_settings:
default_auth = {"auth_type": "apikey"}
user_starter_folder.auth_settings = encrypt_auth_settings(default_auth)
await logger.adebug(f"Set up auth settings for user {user.username}'s starter folder")
elif user_starter_folder.auth_settings:
default_auth = user_starter_folder.auth_settings
# Create API key for this user to access their own starter projects
api_key_name = f"MCP Project {DEFAULT_FOLDER_NAME} - {user.username}"
unmasked_api_key = await create_api_key(session, ApiKeyCreate(name=api_key_name), user.id)
# Build connection URLs for THIS USER'S starter folder (unique ID per user)
streamable_http_url = await get_project_streamable_http_url(user_starter_folder.id)
# Prepare server config (similar to new project creation)
if default_auth.get("auth_type", "none") == "apikey":
command = "uvx"
args = [
"mcp-proxy",
"--transport",
"streamablehttp",
"--headers",
"x-api-key",
unmasked_api_key.api_key,
streamable_http_url,
]
elif default_auth.get("auth_type", "none") == "oauth":
msg = "OAuth authentication is not yet implemented for MCP server creation during project creation."
logger.warning(msg)
raise HTTPException(status_code=501, detail=msg)
else: # default_auth_type == "none"
# No authentication - direct connection
command = "uvx"
args = [
"mcp-proxy",
"--transport",
"streamablehttp",
streamable_http_url,
]
server_config = {"command": command, "args": args}
# Add to user's MCP servers configuration
await logger.adebug(f"Adding MCP server '{server_name}' for user {user.username}")
await update_server(
server_name,
server_config,
user,
session,
get_storage_service(),
settings_service,
)
total_servers_added += 1
await logger.adebug(f"Added starter projects MCP server for user: {user.username}")
except Exception as e: # noqa: BLE001
# If server already exists or other issues, just log and continue
await logger.aerror(f"Could not add starter projects MCP server for user {user.username}: {e}")
continue
await session.commit()
if total_servers_added > 0:
await logger.adebug(f"Added starter projects MCP servers for {total_servers_added} users")
else:
await logger.adebug("No new starter project MCP servers were added")
except Exception as e: # noqa: BLE001
await logger.aerror(f"Failed to auto-configure starter projects MCP servers: {e}")
async def extract_urls_from_strings(strings: list[str]) -> list[str]:
"""Extract URLs from a list of strings.
Args:
strings: List of strings to search for URLs
Returns:
List of URLs found in the input strings
"""
import re
# URL pattern to match http/https URLs
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+[^\s<>"{}|\\^`\[\].,;:!?]'
urls = []
for string in strings:
if isinstance(string, str):
found_urls = re.findall(url_pattern, string)
urls.extend(found_urls)
return urls
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/base/langflow/api/utils/mcp/config_utils.py",
"license": "MIT License",
"lines": 404,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/tests/unit/api/utils/test_config_utils.py | from unittest.mock import patch
from uuid import uuid4
import pytest
from httpx import AsyncClient
from langflow.api.utils.mcp.config_utils import (
MCPServerValidationResult,
auto_configure_starter_projects_mcp,
validate_mcp_server_for_project,
)
from langflow.services.database.models.flow.model import Flow
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
from langflow.services.database.models.folder.model import Folder
from langflow.services.database.models.user.model import User
from langflow.services.deps import session_scope
from sqlmodel import select
def _build_server_config(base_url: str, project_id, transport: str):
"""Return URL and server config for a given transport."""
suffix = "streamable" if transport == "streamable" else "sse"
url = f"{base_url}/api/v1/mcp/project/{project_id}/{suffix}"
if transport == "streamable": # noqa: SIM108
args = ["mcp-proxy", "--transport", "streamablehttp", url]
else:
args = ["mcp-proxy", url]
return url, {"command": "uvx", "args": args}
class TestMCPServerValidationResult:
"""Test the MCPServerValidationResult class and its properties."""
def test_init_defaults(self):
"""Test initialization with default values."""
result = MCPServerValidationResult(server_exists=False, project_id_matches=False)
assert result.server_exists is False
assert result.project_id_matches is False
assert result.server_name == ""
assert result.existing_config is None
assert result.conflict_message == ""
def test_init_with_values(self):
"""Test initialization with custom values."""
config = {"command": "test", "args": ["arg1"]}
result = MCPServerValidationResult(
server_exists=True,
project_id_matches=True,
server_name="test-server",
existing_config=config,
conflict_message="Test conflict",
)
assert result.server_exists is True
assert result.project_id_matches is True
assert result.server_name == "test-server"
assert result.existing_config == config
assert result.conflict_message == "Test conflict"
def test_has_conflict_property(self):
"""Test the has_conflict property."""
# No conflict when server doesn't exist
result = MCPServerValidationResult(server_exists=False, project_id_matches=False)
assert result.has_conflict is False
# No conflict when server exists and project matches
result = MCPServerValidationResult(server_exists=True, project_id_matches=True)
assert result.has_conflict is False
# Conflict when server exists but project doesn't match
result = MCPServerValidationResult(server_exists=True, project_id_matches=False)
assert result.has_conflict is True
def test_should_skip_property(self):
"""Test the should_skip property."""
# Don't skip when server doesn't exist
result = MCPServerValidationResult(server_exists=False, project_id_matches=False)
assert result.should_skip is False
# Don't skip when server exists but project doesn't match
result = MCPServerValidationResult(server_exists=True, project_id_matches=False)
assert result.should_skip is False
# Skip when server exists and project matches
result = MCPServerValidationResult(server_exists=True, project_id_matches=True)
assert result.should_skip is True
def test_should_proceed_property(self):
"""Test the should_proceed property."""
# Proceed when server doesn't exist
result = MCPServerValidationResult(server_exists=False, project_id_matches=False)
assert result.should_proceed is True
# Don't proceed when server exists but project doesn't match
result = MCPServerValidationResult(server_exists=True, project_id_matches=False)
assert result.should_proceed is False
# Proceed when server exists and project matches
result = MCPServerValidationResult(server_exists=True, project_id_matches=True)
assert result.should_proceed is True
class TestValidateMcpServerForProject:
"""Test the validate_mcp_server_for_project function using real API calls."""
@pytest.fixture
async def test_project(self, active_user, client: AsyncClient): # noqa: ARG002
"""Create a test project for testing."""
project_id = uuid4()
async with session_scope() as session:
project = Folder(
id=project_id,
name="Test Project",
user_id=active_user.id,
description="Test project for MCP validation",
)
session.add(project)
await session.commit()
await session.refresh(project)
yield project
# Cleanup
await session.delete(project)
await session.commit()
@pytest.mark.asyncio
async def test_validate_server_not_exists(self, active_user, test_project, client: AsyncClient): # noqa: ARG002
"""Test validation when server doesn't exist."""
from langflow.services.deps import get_settings_service, get_storage_service
async with session_scope() as session:
storage_service = get_storage_service()
settings_service = get_settings_service()
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service
)
assert result.server_exists is False
assert result.project_id_matches is False
assert result.server_name == "lf-test_project"
assert result.existing_config is None
assert result.conflict_message == ""
@pytest.mark.asyncio
@pytest.mark.parametrize("transport", ["streamable", "sse"])
async def test_validate_server_exists_project_matches(
self, active_user, test_project, created_api_key, client: AsyncClient, transport
):
"""Test validation when server exists and project ID matches."""
_, server_config = _build_server_config(client.base_url, test_project.id, transport)
# Create MCP server via API
response = await client.post(
"/api/v2/mcp/servers/lf-test_project", json=server_config, headers={"x-api-key": created_api_key.api_key}
)
assert response.status_code == 200
from langflow.services.deps import get_settings_service, get_storage_service
async with session_scope() as session:
storage_service = get_storage_service()
settings_service = get_settings_service()
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service
)
assert result.server_exists is True
assert result.project_id_matches is True
assert result.server_name == "lf-test_project"
assert result.existing_config == server_config
assert result.conflict_message == ""
# Cleanup - delete the server
await client.delete("/api/v2/mcp/servers/lf-test_project", headers={"x-api-key": created_api_key.api_key})
@pytest.mark.asyncio
@pytest.mark.parametrize("transport", ["streamable", "sse"])
async def test_validate_server_exists_project_doesnt_match(
self, active_user, test_project, created_api_key, client: AsyncClient, transport
):
"""Test validation when server exists but project ID doesn't match."""
other_project_id = uuid4()
server_name = "lf-test_project"
_, server_config = _build_server_config(client.base_url, other_project_id, transport)
# Create MCP server with different project ID via API
response = await client.post(
f"/api/v2/mcp/servers/{server_name}", json=server_config, headers={"x-api-key": created_api_key.api_key}
)
assert response.status_code == 200
from langflow.services.deps import get_settings_service, get_storage_service
async with session_scope() as session:
storage_service = get_storage_service()
settings_service = get_settings_service()
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service, "create"
)
assert result.server_exists is True
assert result.project_id_matches is False
assert result.server_name == server_name
assert result.existing_config == server_config
assert "MCP server name conflict" in result.conflict_message
assert str(test_project.id) in result.conflict_message
# Cleanup - delete the server
await client.delete(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": created_api_key.api_key})
@pytest.mark.asyncio
@pytest.mark.parametrize("transport", ["streamable", "sse"])
async def test_validate_server_different_operations_messages(
self, active_user, test_project, created_api_key, client: AsyncClient, transport
):
"""Test different conflict messages for different operations."""
other_project_id = uuid4()
server_name = "lf-test_project"
_, server_config = _build_server_config(client.base_url, other_project_id, transport)
# Create MCP server with different project ID via API
response = await client.post(
f"/api/v2/mcp/servers/{server_name}", json=server_config, headers={"x-api-key": created_api_key.api_key}
)
assert response.status_code == 200
from langflow.services.deps import get_settings_service, get_storage_service
async with session_scope() as session:
storage_service = get_storage_service()
settings_service = get_settings_service()
# Test create operation
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service, "create"
)
assert "Cannot create MCP server" in result.conflict_message
# Test update operation
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service, "update"
)
assert "Cannot update MCP server" in result.conflict_message
# Test delete operation
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service, "delete"
)
assert "Cannot delete MCP server" in result.conflict_message
# Cleanup - delete the server
await client.delete(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": created_api_key.api_key})
@pytest.mark.asyncio
async def test_validate_server_exception_handling(self, active_user, test_project, client: AsyncClient): # noqa: ARG002
"""Test exception handling during validation."""
from langflow.services.deps import get_settings_service, get_storage_service
async with session_scope() as session:
storage_service = get_storage_service()
settings_service = get_settings_service()
# Mock get_server_list to raise an exception
with patch("langflow.api.utils.mcp.config_utils.get_server_list") as mock_get_server_list:
mock_get_server_list.side_effect = Exception("Test error")
result = await validate_mcp_server_for_project(
test_project.id, test_project.name, active_user, session, storage_service, settings_service
)
# Should return result allowing operation to proceed on validation failure
assert result.server_exists is False
assert result.project_id_matches is False
assert result.server_name == "lf-test_project"
assert result.existing_config is None
assert result.conflict_message == ""
class TestAutoConfigureStarterProjectsMcp:
"""Test the auto_configure_starter_projects_mcp function using real API calls."""
@pytest.fixture
async def sample_user_with_starter_project(self, client: AsyncClient): # noqa: ARG002
"""Create a sample user with starter project for testing."""
user_id = uuid4()
project_id = uuid4()
flow_id = uuid4()
async with session_scope() as session:
# Create user
user = User(id=user_id, username=f"test_starter_user_{user_id}", password="hashed_password") # noqa: S106
session.add(user)
# Create starter folder
starter_folder = Folder(
id=project_id, name=DEFAULT_FOLDER_NAME, user_id=user_id, description="My Collection"
)
session.add(starter_folder)
# Create flow in starter folder
flow = Flow(
id=flow_id,
name="Test Starter Flow",
description="A test starter flow",
folder_id=project_id,
user_id=user_id,
is_component=False,
mcp_enabled=None, # Explicitly set to None to bypass default False
action_name=None,
action_description=None,
)
session.add(flow)
await session.commit()
yield user, starter_folder, flow
# Cleanup
async with session_scope() as session:
# Delete flow first (foreign key dependency)
flow_to_delete = await session.get(Flow, flow_id)
if flow_to_delete:
await session.delete(flow_to_delete)
# Delete folder
folder_to_delete = await session.get(Folder, project_id)
if folder_to_delete:
await session.delete(folder_to_delete)
# Delete user
user_to_delete = await session.get(User, user_id)
if user_to_delete:
await session.delete(user_to_delete)
await session.commit()
@pytest.mark.asyncio
async def test_auto_configure_disabled(self, client: AsyncClient): # noqa: ARG002
"""Test auto-configure when add_projects_to_mcp_servers is disabled."""
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
# Temporarily disable the setting
settings_service.settings.add_projects_to_mcp_servers = False
# This should return early without doing anything
await auto_configure_starter_projects_mcp(session)
# No assertions needed - just ensuring no exceptions are raised
finally:
# Restore original setting
settings_service.settings.add_projects_to_mcp_servers = original_setting
@pytest.mark.asyncio
async def test_auto_configure_no_users(self, client: AsyncClient): # noqa: ARG002
"""Test auto-configure when no users exist."""
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
# Enable the setting
settings_service.settings.add_projects_to_mcp_servers = True
# Delete all users for this test
users = (await session.exec(select(User))).all()
for user in users:
await session.delete(user)
await session.commit()
# This should handle empty users list gracefully
await auto_configure_starter_projects_mcp(session)
# No assertions needed - just ensuring no exceptions are raised
finally:
# Restore original setting
settings_service.settings.add_projects_to_mcp_servers = original_setting
@pytest.mark.asyncio
async def test_auto_configure_success(self, sample_user_with_starter_project, client: AsyncClient): # noqa: ARG002
"""Test successful auto-configuration of starter projects."""
_, starter_folder, flow = sample_user_with_starter_project
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
# Enable the setting
settings_service.settings.add_projects_to_mcp_servers = True
await auto_configure_starter_projects_mcp(session)
# Note: Due to database constraints, mcp_enabled defaults to False instead of None
# This test verifies the function runs without error even when
# flows are already configured (mcp_enabled=False).
# TODO: add test to check the mcp_enabled is False/ True for flows and is that enabled in the MCP server
updated_flow = await session.get(Flow, flow.id)
assert updated_flow.mcp_enabled is False # Remains unchanged due to database default
# Verify starter folder exists
updated_folder = await session.get(Folder, starter_folder.id)
assert updated_folder is not None
finally:
# Restore original setting
settings_service.settings.add_projects_to_mcp_servers = original_setting
@pytest.mark.asyncio
async def test_auto_configure_user_without_starter_folder(self, client: AsyncClient): # noqa: ARG002
"""Test auto-configure for user without starter folder."""
user_id = uuid4()
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
# Enable the setting
settings_service.settings.add_projects_to_mcp_servers = True
# Create user without starter folder
user = User(id=user_id, username="user_no_starter", password="hashed_password") # noqa: S106
session.add(user)
await session.commit()
# This should handle missing starter folder gracefully
await auto_configure_starter_projects_mcp(session)
# No assertions needed - just ensuring no exceptions are raised
finally:
# Restore original setting
settings_service.settings.add_projects_to_mcp_servers = original_setting
# Cleanup
user_to_delete = await session.get(User, user_id)
if user_to_delete:
await session.delete(user_to_delete)
await session.commit()
class TestMultiUserMCPServerAccess:
"""Test multi-user access control for MCP servers."""
@pytest.mark.asyncio
@pytest.mark.parametrize("transport", ["streamable", "sse"])
async def test_cross_user_mcp_server_access_prevention(
self,
client: AsyncClient,
user_one_api_key: str,
user_two_api_key: str,
transport,
):
"""Verify that users cannot access or modify each other's MCP servers,even if they have the same name."""
server_name = f"shared-server-name-{uuid4()}"
if transport == "streamable":
config_one = {
"command": "uvx",
"args": ["mcp-proxy", "--transport", "streamablehttp", f"url-one-{uuid4()}"],
}
config_two = {
"command": "uvx",
"args": ["mcp-proxy", "--transport", "streamablehttp", f"url-two-{uuid4()}"],
}
updated_config_one = {
"command": "uvx",
"args": ["mcp-proxy", "--transport", "streamablehttp", f"updated-url-one-{uuid4()}"],
}
else:
config_one = {"command": "uvx", "args": ["mcp-proxy", f"url-one-{uuid4()}"]}
config_two = {"command": "uvx", "args": ["mcp-proxy", f"url-two-{uuid4()}"]}
updated_config_one = {"command": "uvx", "args": ["mcp-proxy", f"updated-url-one-{uuid4()}"]}
# User One creates a server
response = await client.post(
f"/api/v2/mcp/servers/{server_name}",
json=config_one,
headers={"x-api-key": user_one_api_key},
)
assert response.status_code == 200
assert response.json() == config_one
# User Two creates a server with the same name
response = await client.post(
f"/api/v2/mcp/servers/{server_name}",
json=config_two,
headers={"x-api-key": user_two_api_key},
)
assert response.status_code == 200
assert response.json() == config_two
# Verify each user gets their own server config
response_one = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_one_api_key})
assert response_one.status_code == 200
assert response_one.json() == config_one
response_two = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_two_api_key})
assert response_two.status_code == 200
assert response_two.json() == config_two
# User One updates their server
response = await client.patch(
f"/api/v2/mcp/servers/{server_name}",
json=updated_config_one,
headers={"x-api-key": user_one_api_key},
)
assert response.status_code == 200
assert response.json() == updated_config_one
# Verify User One's server is updated and User Two's is not
response_one = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_one_api_key})
assert response_one.status_code == 200
assert response_one.json() == updated_config_one
response_two = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_two_api_key})
assert response_two.status_code == 200
assert response_two.json() == config_two, "User Two's server should not be affected by User One's update"
# User One tries to delete User Two's server (should only delete their own)
response = await client.delete(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_one_api_key})
assert response.status_code == 200
# Verify User One's server is deleted
response_one = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_one_api_key})
assert response_one.json() is None
# Verify User Two's server still exists
response_two = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_two_api_key})
assert response_two.status_code == 200
assert response_two.json() == config_two, "User Two's server should not be affected by User One's delete"
# Cleanup: User Two deletes their server
response = await client.delete(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_two_api_key})
assert response.status_code == 200
response_two = await client.get(f"/api/v2/mcp/servers/{server_name}", headers={"x-api-key": user_two_api_key})
assert response_two.json() is None
class TestMCPWithDefaultFolderName:
"""Test MCP configuration with different DEFAULT_FOLDER_NAME values."""
@pytest.mark.asyncio
async def test_mcp_finds_default_folder_standard_mode(self, client: AsyncClient): # noqa: ARG002
"""Test that MCP finds the correct folder in standard mode (Starter Project)."""
user_id = uuid4()
project_id = uuid4()
flow_id = uuid4()
async with session_scope() as session:
# Create user
user = User(id=user_id, username=f"test_default_folder_{user_id}", password="hashed_password") # noqa: S106
session.add(user)
# Create folder with DEFAULT_FOLDER_NAME (should match current setting)
folder = Folder(id=project_id, name=DEFAULT_FOLDER_NAME, user_id=user_id, description="Test folder")
session.add(folder)
# Create flow in folder
flow = Flow(
id=flow_id,
name="Test Flow",
description="A test flow",
folder_id=project_id,
user_id=user_id,
is_component=False,
mcp_enabled=None,
)
session.add(flow)
await session.commit()
try:
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
settings_service.settings.add_projects_to_mcp_servers = True
await auto_configure_starter_projects_mcp(session)
# Verify folder was found and processed
updated_folder = await session.get(Folder, project_id)
assert updated_folder is not None
assert updated_folder.name == DEFAULT_FOLDER_NAME
finally:
settings_service.settings.add_projects_to_mcp_servers = original_setting
finally:
# Cleanup
async with session_scope() as session:
flow_to_delete = await session.get(Flow, flow_id)
if flow_to_delete:
await session.delete(flow_to_delete)
folder_to_delete = await session.get(Folder, project_id)
if folder_to_delete:
await session.delete(folder_to_delete)
user_to_delete = await session.get(User, user_id)
if user_to_delete:
await session.delete(user_to_delete)
await session.commit()
@pytest.mark.asyncio
async def test_mcp_with_legacy_folder_after_migration(self, client: AsyncClient): # noqa: ARG002
"""Test that MCP finds migrated folders after setting custom DEFAULT_FOLDER_NAME."""
user_id = uuid4()
project_id = uuid4()
flow_id = uuid4()
# Only run this test when DEFAULT_FOLDER_NAME is set to custom value (e.g., "OpenRAG")
if DEFAULT_FOLDER_NAME in ["Starter Project", "My Collection"]:
pytest.skip("Test only applicable when DEFAULT_FOLDER_NAME is set to custom value")
async with session_scope() as session:
# Create user
user = User(id=user_id, username=f"test_migrated_{user_id}", password="hashed_password") # noqa: S106
session.add(user)
# Create folder with legacy name that will be migrated
legacy_folder = Folder(id=project_id, name="Starter Project", user_id=user_id, description="Legacy folder")
session.add(legacy_folder)
# Create flow in folder
flow = Flow(
id=flow_id,
name="Test Flow in Legacy Folder",
description="A test flow",
folder_id=project_id,
user_id=user_id,
is_component=False,
mcp_enabled=None,
)
session.add(flow)
await session.commit()
try:
# Trigger migration by calling get_or_create_default_folder
from langflow.initial_setup.setup import get_or_create_default_folder
async with session_scope() as session:
migrated_folder = await get_or_create_default_folder(session, user_id)
assert migrated_folder.name == DEFAULT_FOLDER_NAME
assert migrated_folder.id == project_id # Same folder, renamed
# Now test that MCP can find the migrated folder
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
settings_service.settings.add_projects_to_mcp_servers = True
await auto_configure_starter_projects_mcp(session)
# Verify MCP found the migrated folder
updated_folder = await session.get(Folder, project_id)
assert updated_folder is not None
assert updated_folder.name == DEFAULT_FOLDER_NAME
finally:
settings_service.settings.add_projects_to_mcp_servers = original_setting
finally:
# Cleanup
async with session_scope() as session:
flow_to_delete = await session.get(Flow, flow_id)
if flow_to_delete:
await session.delete(flow_to_delete)
folder_to_delete = await session.get(Folder, project_id)
if folder_to_delete:
await session.delete(folder_to_delete)
user_to_delete = await session.get(User, user_id)
if user_to_delete:
await session.delete(user_to_delete)
await session.commit()
@pytest.mark.asyncio
async def test_mcp_skips_wrong_folder_name(self, client: AsyncClient): # noqa: ARG002
"""Test that MCP skips folders that don't match DEFAULT_FOLDER_NAME."""
user_id = uuid4()
project_id = uuid4()
flow_id = uuid4()
async with session_scope() as session:
# Create user
user = User(id=user_id, username=f"test_wrong_folder_{user_id}", password="hashed_password") # noqa: S106
session.add(user)
# Create folder with different name
folder = Folder(id=project_id, name="Some Other Folder", user_id=user_id, description="Wrong folder")
session.add(folder)
# Create flow in folder
flow = Flow(
id=flow_id,
name="Test Flow",
description="A test flow",
folder_id=project_id,
user_id=user_id,
is_component=False,
mcp_enabled=None,
)
session.add(flow)
await session.commit()
try:
async with session_scope() as session:
from langflow.services.deps import get_settings_service
settings_service = get_settings_service()
original_setting = settings_service.settings.add_projects_to_mcp_servers
try:
settings_service.settings.add_projects_to_mcp_servers = True
# Should not raise an error, just skip this user
await auto_configure_starter_projects_mcp(session)
# Verify flow was NOT configured (still None or False)
updated_flow = await session.get(Flow, flow_id)
assert updated_flow.mcp_enabled in [None, False]
finally:
settings_service.settings.add_projects_to_mcp_servers = original_setting
finally:
# Cleanup
async with session_scope() as session:
flow_to_delete = await session.get(Flow, flow_id)
if flow_to_delete:
await session.delete(flow_to_delete)
folder_to_delete = await session.get(Folder, project_id)
if folder_to_delete:
await session.delete(folder_to_delete)
user_to_delete = await session.get(User, user_id)
if user_to_delete:
await session.delete(user_to_delete)
await session.commit()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/api/utils/test_config_utils.py",
"license": "MIT License",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/utils/test_container_utils.py | """Test container detection and URL transformation utilities."""
import socket
from pathlib import Path
from unittest.mock import mock_open, patch
from lfx.utils.util import detect_container_environment, get_container_host, transform_localhost_url
class TestDetectContainerEnvironment:
"""Test the detect_container_environment function."""
def test_detects_docker_via_dockerenv(self):
"""Test detection of Docker via .dockerenv file."""
with patch.object(Path, "exists", return_value=True):
result = detect_container_environment()
assert result == "docker"
def test_detects_docker_via_cgroup(self):
"""Test detection of Docker via /proc/self/cgroup."""
mock_cgroup_content = """12:cpuset:/docker/abc123
11:memory:/docker/abc123
10:devices:/docker/abc123"""
mock_file = mock_open(read_data=mock_cgroup_content)
with (
patch.object(Path, "exists", return_value=False),
patch.object(Path, "open", mock_file),
):
result = detect_container_environment()
assert result == "docker"
def test_detects_podman_via_cgroup(self):
"""Test detection of Podman via /proc/self/cgroup."""
mock_cgroup_content = """12:cpuset:/podman/xyz789
11:memory:/podman/xyz789"""
mock_file = mock_open(read_data=mock_cgroup_content)
with (
patch.object(Path, "exists", return_value=False),
patch.object(Path, "open", mock_file),
):
result = detect_container_environment()
assert result == "podman"
def test_detects_podman_via_env_var(self):
"""Test detection of Podman via container environment variable."""
with (
patch.object(Path, "exists", return_value=False),
patch("builtins.open", side_effect=FileNotFoundError),
patch("os.getenv", return_value="podman"),
):
result = detect_container_environment()
assert result == "podman"
def test_returns_none_when_not_in_container(self):
"""Test returns None when not running in a container."""
mock_cgroup_content = """12:cpuset:/
11:memory:/"""
mock_file = mock_open(read_data=mock_cgroup_content)
with (
patch.object(Path, "exists", return_value=False),
patch.object(Path, "open", mock_file),
patch("os.getenv", return_value=None),
):
result = detect_container_environment()
assert result is None
def test_handles_missing_cgroup_file(self):
"""Test gracefully handles missing /proc/self/cgroup file."""
with (
patch.object(Path, "exists", return_value=False),
patch.object(Path, "open", side_effect=FileNotFoundError),
patch("os.getenv", return_value=None),
):
result = detect_container_environment()
assert result is None
def test_handles_permission_error_on_cgroup(self):
"""Test gracefully handles permission error on /proc/self/cgroup."""
with (
patch.object(Path, "exists", return_value=False),
patch.object(Path, "open", side_effect=PermissionError),
patch("os.getenv", return_value=None),
):
result = detect_container_environment()
assert result is None
class TestGetContainerHost:
"""Test the get_container_host function."""
def test_returns_none_when_not_in_container(self):
"""Test returns None when not in a container."""
with patch("lfx.utils.util.detect_container_environment", return_value=None):
result = get_container_host()
assert result is None
def test_returns_docker_internal_when_resolvable(self):
"""Test returns host.docker.internal when it resolves (Docker Desktop)."""
with (
patch("lfx.utils.util.detect_container_environment", return_value="docker"),
patch("socket.getaddrinfo") as mock_getaddrinfo,
):
# First call succeeds (host.docker.internal resolves)
mock_getaddrinfo.return_value = [("dummy", "data")]
result = get_container_host()
assert result == "host.docker.internal"
mock_getaddrinfo.assert_called_once_with("host.docker.internal", None)
def test_returns_containers_internal_when_docker_internal_fails(self):
"""Test returns host.containers.internal when host.docker.internal doesn't resolve."""
with (
patch("lfx.utils.util.detect_container_environment", return_value="podman"),
patch("socket.getaddrinfo") as mock_getaddrinfo,
):
# First call fails (host.docker.internal doesn't resolve)
# Second call succeeds (host.containers.internal resolves)
def side_effect(hostname, _port):
msg = "Name or service not known"
if hostname == "host.docker.internal":
raise socket.gaierror(msg)
return [("dummy", "data")]
mock_getaddrinfo.side_effect = side_effect
result = get_container_host()
assert result == "host.containers.internal"
def test_returns_gateway_ip_when_no_special_hosts_resolve(self):
"""Test returns gateway IP from routing table when special hostnames don't resolve (Linux)."""
# Mock routing table with gateway 172.17.0.1
# Gateway hex 0111A8C0 = 01 11 A8 C0 in little-endian = C0.A8.11.01 = 192.168.17.1
# Format: Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
mock_route_content = """Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
eth0 00000000 0111A8C0 0003 0 0 0 00000000 0 0 0
eth0 0011A8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0"""
mock_file = mock_open(read_data=mock_route_content)
with (
patch("lfx.utils.util.detect_container_environment", return_value="docker"),
patch("socket.getaddrinfo", side_effect=socket.gaierror),
patch.object(Path, "open", mock_file),
):
result = get_container_host()
# 0111A8C0 reversed in pairs: C0.A8.11.01 = 192.168.17.1
assert result == "192.168.17.1"
def test_returns_none_when_all_methods_fail(self):
"""Test returns None when all detection methods fail."""
with (
patch("lfx.utils.util.detect_container_environment", return_value="docker"),
patch("socket.getaddrinfo", side_effect=socket.gaierror),
patch.object(Path, "open", side_effect=FileNotFoundError),
):
result = get_container_host()
assert result is None
def test_handles_malformed_routing_table(self):
"""Test gracefully handles malformed routing table."""
mock_route_content = """invalid data here"""
mock_file = mock_open(read_data=mock_route_content)
with (
patch("lfx.utils.util.detect_container_environment", return_value="docker"),
patch("socket.getaddrinfo", side_effect=socket.gaierror),
patch.object(Path, "open", mock_file),
):
result = get_container_host()
assert result is None
class TestTransformLocalhostUrl:
"""Test the transform_localhost_url function."""
def test_returns_original_url_when_not_in_container(self):
"""Test returns original URL when not in a container."""
with patch("lfx.utils.util.get_container_host", return_value=None):
url = "http://localhost:5001/api"
result = transform_localhost_url(url)
assert result == url
def test_transforms_localhost_to_docker_internal(self):
"""Test transforms localhost to host.docker.internal in Docker."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://localhost:5001/api"
result = transform_localhost_url(url)
assert result == "http://host.docker.internal:5001/api"
def test_transforms_127001_to_docker_internal(self):
"""Test transforms 127.0.0.1 to host.docker.internal in Docker."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://127.0.0.1:5001/api"
result = transform_localhost_url(url)
assert result == "http://host.docker.internal:5001/api"
def test_transforms_localhost_to_containers_internal(self):
"""Test transforms localhost to host.containers.internal in Podman."""
with patch("lfx.utils.util.get_container_host", return_value="host.containers.internal"):
url = "http://localhost:5001/api"
result = transform_localhost_url(url)
assert result == "http://host.containers.internal:5001/api"
def test_transforms_127001_to_containers_internal(self):
"""Test transforms 127.0.0.1 to host.containers.internal in Podman."""
with patch("lfx.utils.util.get_container_host", return_value="host.containers.internal"):
url = "http://127.0.0.1:5001/api"
result = transform_localhost_url(url)
assert result == "http://host.containers.internal:5001/api"
def test_transforms_localhost_to_gateway_ip_on_linux(self):
"""Test transforms localhost to gateway IP on Linux containers."""
with patch("lfx.utils.util.get_container_host", return_value="172.17.0.1"):
url = "http://localhost:5001/api"
result = transform_localhost_url(url)
assert result == "http://172.17.0.1:5001/api"
def test_transforms_127001_to_gateway_ip_on_linux(self):
"""Test transforms 127.0.0.1 to gateway IP on Linux containers."""
with patch("lfx.utils.util.get_container_host", return_value="172.17.0.1"):
url = "http://127.0.0.1:5001/api"
result = transform_localhost_url(url)
assert result == "http://172.17.0.1:5001/api"
def test_does_not_transform_non_localhost_urls(self):
"""Test does not transform URLs that don't contain localhost or 127.0.0.1."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://example.com:5001/api"
result = transform_localhost_url(url)
assert result == url
def test_transforms_url_without_path(self):
"""Test transforms URL without path."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://localhost:5001"
result = transform_localhost_url(url)
assert result == "http://host.docker.internal:5001"
def test_transforms_url_with_complex_path(self):
"""Test transforms URL with complex path and query parameters."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://localhost:5001/api/v1/convert?format=json&timeout=30"
result = transform_localhost_url(url)
assert result == "http://host.docker.internal:5001/api/v1/convert?format=json&timeout=30"
def test_transforms_https_url(self):
"""Test transforms HTTPS URLs."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "https://localhost:5001/api"
result = transform_localhost_url(url)
assert result == "https://host.docker.internal:5001/api"
def test_transforms_url_without_port(self):
"""Test transforms URL without explicit port."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
url = "http://localhost/api"
result = transform_localhost_url(url)
assert result == "http://host.docker.internal/api"
def test_handles_none_url_gracefully(self):
"""Test returns None when URL is None without raising TypeError."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
result = transform_localhost_url(None)
assert result is None
def test_handles_empty_string_url_gracefully(self):
"""Test returns empty string when URL is empty string."""
with patch("lfx.utils.util.get_container_host", return_value="host.docker.internal"):
result = transform_localhost_url("")
assert result == ""
def test_handles_none_url_when_not_in_container(self):
"""Test returns None when URL is None and not in a container."""
with patch("lfx.utils.util.get_container_host", return_value=None):
result = transform_localhost_url(None)
assert result is None
def test_handles_empty_string_url_when_not_in_container(self):
"""Test returns empty string when URL is empty and not in a container."""
with patch("lfx.utils.util.get_container_host", return_value=None):
result = transform_localhost_url("")
assert result == ""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/utils/test_container_utils.py",
"license": "MIT License",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/base/data/test_base_file.py | """Tests for BaseFileComponent.load_files_message method."""
import json
import tempfile
from pathlib import Path
from lfx.base.data.base_file import BaseFileComponent
from lfx.schema.data import Data
from lfx.schema.message import Message
class TestFileComponent(BaseFileComponent):
"""Test implementation of BaseFileComponent for testing."""
VALID_EXTENSIONS = ["txt", "json", "csv"]
def __init__(self, **data):
"""Initialize with proper component setup."""
super().__init__(**data)
# Initialize the inputs to avoid AttributeError
self.set_attributes(
{
"path": [],
"file_path": None,
"separator": "\n\n",
"silent_errors": False,
"delete_server_file_after_processing": True,
"ignore_unsupported_extensions": True,
"ignore_unspecified_files": False,
}
)
def process_files(self, file_list):
"""Test implementation that creates Data objects from file content."""
processed_files = []
for file in file_list:
if file.path.exists():
content = file.path.read_text(encoding="utf-8")
# Create Data objects based on file extension
if file.path.suffix == ".json":
try:
json_data = json.loads(content)
data = Data(data=json_data)
except json.JSONDecodeError:
data = Data(data={"text": content, "file_path": str(file.path)})
else:
data = Data(data={"text": content, "file_path": str(file.path)})
file.data = [data]
processed_files.append(file)
return processed_files
class TestLoadFilesMessage:
"""Test cases for BaseFileComponent.load_files_message method."""
def setup_method(self):
"""Set up test fixtures."""
self.component = TestFileComponent()
self.temp_dir = tempfile.TemporaryDirectory()
self.temp_path = Path(self.temp_dir.name)
def teardown_method(self):
"""Clean up test fixtures."""
self.temp_dir.cleanup()
def test_load_files_message_empty_data(self):
"""Test load_files_message with no files returns empty Message."""
# Set empty path
self.component.path = []
result = self.component.load_files_message()
assert isinstance(result, Message)
# When no files are provided, load_files_core returns [Data()] which has data={}
# When get_text() returns None/empty, the method falls back to orjson.dumps({})
assert result.text in {"{}", ""}
def test_load_files_message_with_simple_text_file(self):
"""Test load_files_message with a simple text file."""
# Create a simple text file
text_file = self.temp_path / "simple.txt"
text_file.write_text("Hello world", encoding="utf-8")
self.component.path = [str(text_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
assert result.text == "Hello world"
def test_load_files_message_with_json_dict_content(self):
"""Test load_files_message with JSON file containing dict (simulates get_text() returning dict)."""
# Create JSON file with dict content
json_content = {"content": "dict content", "metadata": "extra info", "type": "test"}
json_file = self.temp_path / "test.json"
json_file.write_text(json.dumps(json_content), encoding="utf-8")
self.component.path = [str(json_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
# Should contain the JSON content as string
result_text = result.text
assert "content" in result_text
assert "dict content" in result_text
assert "metadata" in result_text
def test_load_files_message_with_multiple_files(self):
"""Test load_files_message with multiple files."""
# Create multiple text files
file1 = self.temp_path / "first.txt"
file1.write_text("First text", encoding="utf-8")
file2 = self.temp_path / "second.txt"
file2.write_text("Second text", encoding="utf-8")
self.component.path = [str(file1), str(file2)]
result = self.component.load_files_message()
assert isinstance(result, Message)
assert "First text" in result.text
assert "Second text" in result.text
assert "\n\n" in result.text # Default separator
def test_load_files_message_with_custom_separator(self):
"""Test load_files_message with custom separator."""
self.component.separator = " | "
# Create two text files
file1 = self.temp_path / "first.txt"
file1.write_text("First", encoding="utf-8")
file2 = self.temp_path / "second.txt"
file2.write_text("Second", encoding="utf-8")
self.component.path = [str(file1), str(file2)]
result = self.component.load_files_message()
assert result.text == "First | Second"
def test_load_files_message_with_json_complex_structure(self):
"""Test load_files_message with complex JSON structure."""
complex_data = {
"metadata": {"type": "document", "version": 1},
"properties": {"author": "test", "date": "2024-01-01"},
"content": "This should be extracted",
}
json_file = self.temp_path / "complex.json"
json_file.write_text(json.dumps(complex_data), encoding="utf-8")
self.component.path = [str(json_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
# Should contain the extracted content field
assert "This should be extracted" in result.text
def test_load_files_message_with_json_no_common_fields(self):
"""Test with JSON that has no common text fields (should use orjson.dumps)."""
complex_data = {
"metadata": {"type": "document", "version": 1},
"properties": {"author": "test", "date": "2024-01-01"},
# No "text", "content", "value", or "message" fields
}
json_file = self.temp_path / "no_text_fields.json"
json_file.write_text(json.dumps(complex_data), encoding="utf-8")
self.component.path = [str(json_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
# Should contain JSON representation since no common text fields found
assert "metadata" in result.text
assert "properties" in result.text
assert "author" in result.text
def test_load_files_message_with_none_separator(self):
r"""Test load_files_message when separator is None (should default to \\n\\n)."""
self.component.separator = None
file1 = self.temp_path / "first.txt"
file1.write_text("First", encoding="utf-8")
file2 = self.temp_path / "second.txt"
file2.write_text("Second", encoding="utf-8")
self.component.path = [str(file1), str(file2)]
result = self.component.load_files_message()
# Should default to "\n\n" when separator is None
assert result.text == "First\n\nSecond"
def test_load_files_message_ensures_all_parts_are_strings(self):
"""Test that the method never tries to join non-string elements (core bug test)."""
# Create a mixed content scenario - JSON with dict content
dict_content = {"nested": {"data": "value"}, "another": "dict"}
json_file = self.temp_path / "mixed_content.json"
json_file.write_text(json.dumps(dict_content), encoding="utf-8")
self.component.path = [str(json_file)]
# This should not raise "sequence item 0: expected str instance, dict found"
result = self.component.load_files_message()
assert isinstance(result, Message)
assert isinstance(result.text, str)
# Verify the content was properly converted to string
assert len(result.text) > 0
assert "nested" in result.text or "another" in result.text
def test_load_files_message_extract_common_text_fields(self):
"""Test extraction of common text fields like 'content', 'value', 'message'."""
test_cases = [
({"content": "Content text"}, "Content text"),
({"value": "Value text"}, "Value text"),
({"message": "Message text"}, "Message text"),
({"some_field": "ignored", "content": "Content wins"}, "Content wins"),
]
for i, (data_dict, expected_text) in enumerate(test_cases):
json_file = self.temp_path / f"test_field_{i}.json"
json_file.write_text(json.dumps(data_dict), encoding="utf-8")
self.component.path = [str(json_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
assert expected_text in result.text
def test_load_files_message_mixed_file_types(self):
"""Test mixed scenarios with text files and JSON files."""
# Create text file
text_file = self.temp_path / "text_response.txt"
text_file.write_text("String response", encoding="utf-8")
# Create JSON file with dict content
json_file = self.temp_path / "json_response.json"
json_file.write_text(json.dumps({"parsed": "Dict content"}), encoding="utf-8")
# Create JSON file with content field
content_file = self.temp_path / "content_response.json"
content_file.write_text(json.dumps({"content": "Field extraction"}), encoding="utf-8")
self.component.path = [str(text_file), str(json_file), str(content_file)]
result = self.component.load_files_message()
assert isinstance(result, Message)
result_text = result.text
assert "String response" in result_text
assert "Field extraction" in result_text
# JSON content should be present in some form
assert "parsed" in result_text or "Dict content" in result_text
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/base/data/test_base_file.py",
"license": "MIT License",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/graph/vertex/test_param_handler.py | """Tests for parameter handler table load_from_db functionality."""
from unittest.mock import MagicMock
import pytest
from lfx.graph.vertex.param_handler import ParameterHandler
from lfx.schema.table import Column
class TestParameterHandlerTableLoadFromDb:
"""Tests for table load_from_db functionality in ParameterHandler."""
def setup_method(self):
"""Set up test fixtures."""
# Create mock vertex
self.mock_vertex = MagicMock()
self.mock_vertex.data = {
"node": {
"template": {
"table_field": {
"type": "table",
"table_schema": [
Column(name="username", load_from_db=True, default="admin"),
Column(name="email", load_from_db=True, default="user@example.com"),
Column(name="role", load_from_db=False, default="user"),
Column(name="active", load_from_db=False, default=True),
],
}
}
}
}
# Create parameter handler
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
def test_handle_table_field_with_load_from_db_columns(self):
"""Test _handle_table_field identifies load_from_db columns correctly."""
# Test data
table_data = [
{"username": "ADMIN_USER", "email": "ADMIN_EMAIL", "role": "admin", "active": True},
{"username": "USER1", "email": "USER1_EMAIL", "role": "user", "active": False},
]
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Check that table data is preserved
assert result_params["table_field"] == table_data
# Check that load_from_db columns are identified
assert "table_field_load_from_db_columns" in result_params
load_from_db_columns = result_params["table_field_load_from_db_columns"]
assert set(load_from_db_columns) == {"username", "email"}
# Check that table field is added to load_from_db_fields
assert "table:table_field" in self.handler.load_from_db_fields
def test_handle_table_field_with_no_load_from_db_columns(self):
"""Test _handle_table_field when no columns have load_from_db=True."""
# Update template to have no load_from_db columns
self.mock_vertex.data["node"]["template"]["table_field"]["table_schema"] = [
Column(name="field1", load_from_db=False, default="value1"),
Column(name="field2", load_from_db=False, default="value2"),
]
# Recreate handler with updated template
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
table_data = [{"field1": "val1", "field2": "val2"}]
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Check that table data is preserved
assert result_params["table_field"] == table_data
# Check that no metadata is added
assert "table_field_load_from_db_columns" not in result_params
assert "table:table_field" not in self.handler.load_from_db_fields
def test_handle_table_field_with_dict_schema(self):
"""Test _handle_table_field with dictionary-based schema."""
# Update template to use dict schema instead of Column objects
self.mock_vertex.data["node"]["template"]["table_field"]["table_schema"] = [
{"name": "api_key", "load_from_db": True},
{"name": "timeout", "load_from_db": False},
]
# Recreate handler with updated template
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
table_data = [{"api_key": "MY_API_KEY", "timeout": 30}] # pragma: allowlist secret
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Check that load_from_db columns are identified correctly
load_from_db_columns = result_params["table_field_load_from_db_columns"]
assert load_from_db_columns == ["api_key"]
assert "table:table_field" in self.handler.load_from_db_fields
def test_handle_table_field_with_none_value(self):
"""Test _handle_table_field with None table value."""
params = {}
# Call the method with None
result_params = self.handler._handle_table_field("table_field", None, params)
# Should return empty list
assert result_params["table_field"] == []
# Should not add any metadata since no schema processing occurs
assert "table_field_load_from_db_columns" not in result_params
assert "table:table_field" not in self.handler.load_from_db_fields
def test_handle_table_field_with_invalid_data_type(self):
"""Test _handle_table_field with invalid data type raises ValueError."""
params = {}
# Test with string (invalid for table)
with pytest.raises(ValueError, match=r"Invalid value type.*for table field"):
self.handler._handle_table_field("table_field", "invalid_data", params)
# Test with list of non-dicts (invalid for table)
with pytest.raises(ValueError, match=r"Invalid value type.*for table field"):
self.handler._handle_table_field("table_field", ["string1", "string2"], params)
def test_handle_table_field_with_empty_table_schema(self):
"""Test _handle_table_field when table_schema is empty."""
# Update template to have empty schema
self.mock_vertex.data["node"]["template"]["table_field"]["table_schema"] = []
# Recreate handler with updated template
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
table_data = [{"field1": "value1"}]
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Should preserve table data but not add metadata
assert result_params["table_field"] == table_data
assert "table_field_load_from_db_columns" not in result_params
assert "table:table_field" not in self.handler.load_from_db_fields
def test_handle_table_field_with_missing_table_schema(self):
"""Test _handle_table_field when table_schema key is missing."""
# Update template to not have table_schema
self.mock_vertex.data["node"]["template"]["table_field"] = {"type": "table"}
# Recreate handler with updated template
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
table_data = [{"field1": "value1"}]
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Should preserve table data but not add metadata
assert result_params["table_field"] == table_data
assert "table_field_load_from_db_columns" not in result_params
assert "table:table_field" not in self.handler.load_from_db_fields
def test_handle_table_field_with_mixed_schema_types(self):
"""Test _handle_table_field with mixed Column objects and dicts."""
# Update template to have mixed schema types
self.mock_vertex.data["node"]["template"]["table_field"]["table_schema"] = [
Column(name="col1", load_from_db=True), # Column object
{"name": "col2", "load_from_db": True}, # Dict
Column(name="col3", load_from_db=False), # Column object
{"name": "col4", "load_from_db": False}, # Dict
]
# Recreate handler with updated template
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
table_data = [{"col1": "val1", "col2": "val2", "col3": "val3", "col4": "val4"}]
params = {}
# Call the method
result_params = self.handler._handle_table_field("table_field", table_data, params)
# Should identify both types of load_from_db columns
load_from_db_columns = result_params["table_field_load_from_db_columns"]
assert set(load_from_db_columns) == {"col1", "col2"}
assert "table:table_field" in self.handler.load_from_db_fields
class TestParameterHandlerDictField:
"""Tests for _handle_dict_field in ParameterHandler."""
def setup_method(self):
"""Set up test fixtures."""
self.mock_vertex = MagicMock()
self.mock_vertex.data = {"node": {"template": {}}}
self.handler = ParameterHandler(self.mock_vertex, storage_service=None)
def test_handle_dict_field_with_key_value_list(self):
"""Test that a list of {"key": k, "value": v} pairs is converted to a flat dict."""
val = [
{"key": "header1", "value": "value1"},
{"key": "header2", "value": "value2"},
]
params = {}
result = self.handler._handle_dict_field("headers", val, params)
assert result["headers"] == {"header1": "value1", "header2": "value2"}
def test_handle_dict_field_with_single_key_value_item(self):
"""Test a single-item key-value list."""
val = [{"key": "auth", "value": "token123"}]
params = {}
result = self.handler._handle_dict_field("headers", val, params)
assert result["headers"] == {"auth": "token123"}
def test_handle_dict_field_with_flat_dict(self):
"""Test that a plain dict is passed through as-is."""
val = {"header1": "value1", "header2": "value2"}
params = {}
result = self.handler._handle_dict_field("headers", val, params)
assert result["headers"] == {"header1": "value1", "header2": "value2"}
def test_handle_dict_field_with_empty_list(self):
"""Test that an empty list produces an empty dict."""
params = {}
result = self.handler._handle_dict_field("headers", [], params)
assert result["headers"] == {}
def test_handle_dict_field_with_generic_list_of_dicts(self):
"""Test that a list of dicts without the key/value pattern merges them."""
val = [{"a": 1}, {"b": 2}]
params = {}
result = self.handler._handle_dict_field("data", val, params)
assert result["data"] == {"a": 1, "b": 2}
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/graph/vertex/test_param_handler.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/services/telemetry/test_telemetry_schema.py | """Comprehensive unit tests for telemetry schema classes.
Testing library and framework: pytest
"""
import re
import pytest
from langflow.services.telemetry.schema import (
ComponentPayload,
EmailPayload,
PlaygroundPayload,
RunPayload,
ShutdownPayload,
VersionPayload,
)
class TestRunPayload:
"""Test cases for RunPayload."""
def test_run_payload_initialization_with_valid_data(self):
"""Test RunPayload initialization with valid parameters."""
payload = RunPayload(
run_is_webhook=True,
run_seconds=120,
run_success=True,
run_error_message="",
client_type="oss",
run_id=None,
)
assert payload.run_is_webhook is True
assert payload.run_seconds == 120
assert payload.run_success is True
assert payload.run_error_message == ""
assert payload.client_type == "oss"
assert payload.run_id is None
def test_run_payload_initialization_with_defaults(self):
"""Test RunPayload initialization with default values."""
payload = RunPayload(
run_seconds=60,
run_success=False,
run_error_message="Test error",
run_id=None,
)
assert payload.run_is_webhook is False # Default value
assert payload.run_seconds == 60
assert payload.run_success is False
assert payload.run_error_message == "Test error"
assert payload.client_type is None # Default value
assert payload.run_id is None
def test_run_payload_serialization(self):
"""Test RunPayload serialization to dictionary."""
payload = RunPayload(
run_is_webhook=True,
run_seconds=180,
run_success=True,
run_error_message="",
client_type="desktop",
run_id=None,
)
data = payload.model_dump(by_alias=True)
assert data["runIsWebhook"] is True
assert data["runSeconds"] == 180
assert data["runSuccess"] is True
assert data["runErrorMessage"] == ""
assert data["clientType"] == "desktop"
assert data["runId"] is None
def test_run_payload_with_negative_seconds(self):
"""Test RunPayload accepts negative seconds (no validation in schema)."""
payload = RunPayload(
run_seconds=-10,
run_success=True,
run_error_message="",
run_id=None,
)
assert payload.run_seconds == -10
assert payload.run_success is True
def test_run_payload_with_long_error_message(self):
"""Test RunPayload with long error message."""
long_error = "x" * 1000
payload = RunPayload(
run_seconds=30,
run_success=False,
run_error_message=long_error,
client_type="oss",
run_id=None,
)
assert payload.run_error_message == long_error
assert len(payload.run_error_message) == 1000
class TestShutdownPayload:
"""Test cases for ShutdownPayload."""
def test_shutdown_payload_initialization(self):
"""Test ShutdownPayload initialization with valid parameters."""
payload = ShutdownPayload(time_running=3600, client_type="oss")
assert payload.time_running == 3600
assert payload.client_type == "oss"
def test_shutdown_payload_initialization_without_client_type(self):
"""Test ShutdownPayload initialization without client_type."""
payload = ShutdownPayload(time_running=1800)
assert payload.time_running == 1800
assert payload.client_type is None
def test_shutdown_payload_serialization(self):
"""Test ShutdownPayload serialization to dictionary."""
payload = ShutdownPayload(time_running=7200, client_type="desktop")
data = payload.model_dump(by_alias=True)
assert data["timeRunning"] == 7200
assert data["clientType"] == "desktop"
def test_shutdown_payload_with_negative_time(self):
"""Test ShutdownPayload accepts negative time (no validation in schema)."""
payload = ShutdownPayload(time_running=-100)
assert payload.time_running == -100
def test_shutdown_payload_with_zero_time(self):
"""Test ShutdownPayload with zero time running."""
payload = ShutdownPayload(time_running=0)
assert payload.time_running == 0
class TestEmailPayload:
"""Test cases for EmailPayload."""
CLIENT_TYPE_DESKTOP: str = "desktop"
TEST_EMAIL_1: str = "test@ibm.com"
def test_email_payload_initialization(self):
"""Test EmailPayload initialization with valid parameters."""
payload = EmailPayload(
client_type=self.CLIENT_TYPE_DESKTOP,
email=self.TEST_EMAIL_1,
)
assert payload.client_type == self.CLIENT_TYPE_DESKTOP
assert payload.email == self.TEST_EMAIL_1
def test_email_payload_initialization_without_client_type(self):
"""Test EmailPayload initialization without client_type."""
payload = EmailPayload(
email=self.TEST_EMAIL_1,
)
assert payload.client_type is None
assert payload.email == self.TEST_EMAIL_1
def test_email_payload_initialization_with_invalid_email(self):
"""Test EmailPayload initialization with an invalid email."""
payload = None
error_message: str = (
"value is not a valid email address: The part after the @-sign is not valid. It should have a period."
)
with pytest.raises(ValueError, match=re.escape(error_message)):
payload = EmailPayload(
client_type=self.CLIENT_TYPE_DESKTOP,
email="test@ibm",
)
assert payload is None
def test_email_payload_serialization(self):
"""Test EmailPayload serialization to dictionary."""
payload = EmailPayload(
client_type=self.CLIENT_TYPE_DESKTOP,
email=self.TEST_EMAIL_1,
)
data = payload.model_dump(by_alias=True)
assert data["clientType"] == self.CLIENT_TYPE_DESKTOP
assert data["email"] == self.TEST_EMAIL_1
class TestVersionPayload:
"""Test cases for VersionPayload."""
def test_version_payload_initialization(self):
"""Test VersionPayload initialization with valid parameters."""
payload = VersionPayload(
package="langflow",
version="1.0.0",
platform="Linux-5.4.0",
python="3.9",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
client_type="oss",
)
assert payload.package == "langflow"
assert payload.version == "1.0.0"
assert payload.platform == "Linux-5.4.0"
assert payload.python == "3.9"
assert payload.arch == "x86_64"
assert payload.auto_login is False
assert payload.cache_type == "memory"
assert payload.backend_only is False
assert payload.client_type == "oss"
def test_version_payload_initialization_with_all_required_fields(self):
"""Test VersionPayload initialization with all required fields."""
payload = VersionPayload(
package="langflow",
version="1.0.0",
platform="Windows",
python="3.8",
arch="x86_64",
auto_login=True,
cache_type="redis",
backend_only=True,
)
assert payload.package == "langflow"
assert payload.version == "1.0.0"
assert payload.client_type is None # Default value
def test_version_payload_serialization(self):
"""Test VersionPayload serialization to dictionary."""
payload = VersionPayload(
package="langflow",
version="1.5.2",
platform="macOS-12.0",
python="3.10",
arch="arm64",
auto_login=True,
cache_type="redis",
backend_only=True,
client_type="desktop",
)
data = payload.model_dump(by_alias=True)
assert data["package"] == "langflow"
assert data["version"] == "1.5.2"
assert data["platform"] == "macOS-12.0"
assert data["python"] == "3.10"
assert data["arch"] == "arm64"
assert data["autoLogin"] is True
assert data["cacheType"] == "redis"
assert data["backendOnly"] is True
assert data["clientType"] == "desktop"
def test_version_payload_with_special_characters(self):
"""Test VersionPayload with special characters in strings."""
payload = VersionPayload(
package="langflow-dev",
version="1.0.0-beta.1",
platform="Windows 10 Pro",
python="3.9.7",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
)
assert payload.package == "langflow-dev"
assert payload.version == "1.0.0-beta.1"
assert payload.platform == "Windows 10 Pro"
class TestPlaygroundPayload:
"""Test cases for PlaygroundPayload."""
def test_playground_payload_initialization(self):
"""Test PlaygroundPayload initialization with valid parameters."""
payload = PlaygroundPayload(
playground_seconds=45,
playground_component_count=5,
playground_success=True,
playground_error_message="",
client_type="oss",
playground_run_id=None,
)
assert payload.playground_seconds == 45
assert payload.playground_component_count == 5
assert payload.playground_success is True
assert payload.playground_error_message == ""
assert payload.client_type == "oss"
assert payload.playground_run_id is None
def test_playground_payload_initialization_with_none_component_count(self):
"""Test PlaygroundPayload initialization with None component count."""
payload = PlaygroundPayload(
playground_seconds=30,
playground_component_count=None,
playground_success=True,
playground_error_message="",
playground_run_id=None,
)
assert payload.playground_seconds == 30
assert payload.playground_component_count is None
assert payload.playground_success is True
def test_playground_payload_serialization(self):
"""Test PlaygroundPayload serialization to dictionary."""
payload = PlaygroundPayload(
playground_seconds=60,
playground_component_count=10,
playground_success=False,
playground_error_message="Component failed",
client_type="desktop",
playground_run_id=None,
)
data = payload.model_dump(by_alias=True)
assert data["playgroundSeconds"] == 60
assert data["playgroundComponentCount"] == 10
assert data["playgroundSuccess"] is False
assert data["playgroundErrorMessage"] == "Component failed"
assert data["clientType"] == "desktop"
assert data["playgroundRunId"] is None
def test_playground_payload_with_negative_seconds(self):
"""Test PlaygroundPayload accepts negative seconds (no validation in schema)."""
payload = PlaygroundPayload(
playground_seconds=-10,
playground_component_count=0,
playground_success=True,
playground_error_message="",
playground_run_id=None,
)
assert payload.playground_seconds == -10
assert payload.playground_success is True
def test_playground_payload_with_negative_component_count(self):
"""Test PlaygroundPayload accepts negative component count (no validation in schema)."""
payload = PlaygroundPayload(
playground_seconds=30,
playground_component_count=-5,
playground_success=True,
playground_error_message="",
playground_run_id=None,
)
assert payload.playground_component_count == -5
assert payload.playground_success is True
def test_playground_payload_with_failed_execution(self):
"""Test PlaygroundPayload with failed execution."""
payload = PlaygroundPayload(
playground_seconds=15,
playground_component_count=3,
playground_success=False,
playground_error_message="Timeout occurred",
client_type="oss",
playground_run_id=None,
)
assert payload.playground_success is False
assert payload.playground_error_message == "Timeout occurred"
class TestComponentPayload:
"""Test cases for ComponentPayload."""
def test_component_payload_initialization(self):
"""Test ComponentPayload initialization with valid parameters."""
payload = ComponentPayload(
component_name="TextInput",
component_id="comp-123",
component_seconds=2,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
)
assert payload.component_name == "TextInput"
assert payload.component_seconds == 2
assert payload.component_success is True
assert payload.component_error_message is None
assert payload.client_type == "oss"
assert payload.component_run_id is None
def test_component_payload_initialization_with_error(self):
"""Test ComponentPayload initialization with error message."""
payload = ComponentPayload(
component_name="LLMChain",
component_id="comp-456",
component_seconds=5,
component_success=False,
component_error_message="API rate limit exceeded",
client_type="desktop",
component_run_id=None,
)
assert payload.component_name == "LLMChain"
assert payload.component_seconds == 5
assert payload.component_success is False
assert payload.component_error_message == "API rate limit exceeded"
assert payload.client_type == "desktop"
assert payload.component_run_id is None
def test_component_payload_serialization(self):
"""Test ComponentPayload serialization to dictionary."""
payload = ComponentPayload(
component_name="OpenAI",
component_id="comp-789",
component_seconds=3,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
)
data = payload.model_dump(by_alias=True)
assert data["componentName"] == "OpenAI"
assert data["componentSeconds"] == 3
assert data["componentSuccess"] is True
assert data["componentErrorMessage"] is None
assert data["clientType"] == "oss"
assert data["componentRunId"] is None
def test_component_payload_with_negative_seconds(self):
"""Test ComponentPayload accepts negative seconds (no validation in schema)."""
payload = ComponentPayload(
component_name="TestComponent",
component_id="comp-neg",
component_seconds=-1,
component_success=True,
component_error_message=None,
component_run_id=None,
)
assert payload.component_seconds == -1
assert payload.component_success is True
def test_component_payload_with_empty_name(self):
"""Test ComponentPayload with empty component name."""
payload = ComponentPayload(
component_name="",
component_id="comp-empty",
component_seconds=1,
component_success=True,
component_error_message=None,
component_run_id=None,
)
assert payload.component_name == ""
assert payload.component_success is True
def test_component_payload_with_special_characters_in_name(self):
"""Test ComponentPayload with special characters in component name."""
payload = ComponentPayload(
component_name="Custom-Component_v1.0",
component_id="comp-special",
component_seconds=1,
component_success=True,
component_error_message=None,
component_run_id=None,
)
assert payload.component_name == "Custom-Component_v1.0"
class TestPayloadEdgeCases:
"""Test edge cases and boundary conditions for all payloads."""
def test_run_payload_with_extremely_large_values(self):
"""Test RunPayload with extremely large values."""
large_seconds = 2**31 - 1 # Max int32 value
payload = RunPayload(
run_seconds=large_seconds,
run_success=True,
run_error_message="x" * 10000,
run_id=None,
)
assert payload.run_seconds == large_seconds
assert len(payload.run_error_message) == 10000
def test_shutdown_payload_with_maximum_time(self):
"""Test ShutdownPayload with maximum time value."""
max_time = 2**31 - 1
payload = ShutdownPayload(time_running=max_time)
assert payload.time_running == max_time
def test_version_payload_with_unicode_strings(self):
"""Test VersionPayload with unicode strings."""
payload = VersionPayload(
package="langflow-🚀",
version="1.0.0-测试",
platform="Linux-测试系统",
python="3.9",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
)
assert payload.package == "langflow-🚀"
assert payload.version == "1.0.0-测试"
assert payload.platform == "Linux-测试系统"
def test_playground_payload_with_zero_values(self):
"""Test PlaygroundPayload with zero values."""
payload = PlaygroundPayload(
playground_seconds=0,
playground_component_count=0,
playground_success=True,
playground_error_message="",
playground_run_id=None,
)
assert payload.playground_seconds == 0
assert payload.playground_component_count == 0
def test_component_payload_with_very_long_name(self):
"""Test ComponentPayload with very long component name."""
long_name = "x" * 1000
payload = ComponentPayload(
component_name=long_name,
component_id="comp-long",
component_seconds=1,
component_success=True,
component_error_message=None,
component_run_id=None,
)
assert payload.component_name == long_name
assert len(payload.component_name) == 1000
@pytest.mark.parametrize("client_type", ["oss", "desktop", "cloud", "enterprise", "custom"])
def test_all_payloads_with_different_client_types(self, client_type):
"""Test all payloads with different client types."""
# Test RunPayload
run_payload = RunPayload(
run_seconds=60,
run_success=True,
client_type=client_type,
run_error_message="",
run_id=None,
)
assert run_payload.client_type == client_type
# Test ShutdownPayload
shutdown_payload = ShutdownPayload(time_running=3600, client_type=client_type)
assert shutdown_payload.client_type == client_type
# Test VersionPayload
version_payload = VersionPayload(
package="langflow",
version="1.0.0",
platform="Linux",
python="3.9",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
client_type=client_type,
)
assert version_payload.client_type == client_type
# Test PlaygroundPayload
playground_payload = PlaygroundPayload(
playground_seconds=30,
playground_component_count=0,
playground_success=True,
client_type=client_type,
playground_error_message="",
playground_run_id=None,
)
assert playground_payload.client_type == client_type
# Test ComponentPayload
component_payload = ComponentPayload(
component_name="TestComponent",
component_id="comp-test",
component_seconds=1,
component_success=True,
component_error_message=None,
client_type=client_type,
component_run_id=None,
)
assert component_payload.client_type == client_type
def test_payload_serialization_with_none_values(self):
"""Test payload serialization when optional fields are None."""
# Test with client_type as None
run_payload = RunPayload(run_seconds=60, run_success=True, client_type=None, run_error_message="", run_id=None)
data = run_payload.model_dump(by_alias=True, exclude_none=True)
assert "clientType" not in data # Should be excluded when None
def test_payload_serialization_with_empty_strings(self):
"""Test payload serialization with empty strings."""
run_payload = RunPayload(
run_seconds=60,
run_success=True,
run_error_message="",
client_type="oss", # Empty string
run_id=None,
)
data = run_payload.model_dump(by_alias=True)
assert data["runErrorMessage"] == ""
def test_payload_validation_with_invalid_types(self):
"""Test payload validation with invalid data types."""
# Test RunPayload with string instead of int
with pytest.raises((ValueError, TypeError)):
RunPayload(
run_seconds="not_a_number", # type: ignore # noqa: PGH003
run_success=True,
run_error_message="",
run_id=None,
)
# Test with boolean instead of string for error message
with pytest.raises((ValueError, TypeError)):
RunPayload(
run_seconds=60,
run_success=True,
run_error_message=True, # type: ignore # noqa: PGH003
run_id=None,
)
class TestPayloadIntegration:
"""Integration tests for payload interactions."""
def test_payload_workflow_simulation(self):
"""Simulate a complete telemetry workflow with all payload types."""
# 1. Version payload (startup)
version_payload = VersionPayload(
package="langflow",
version="1.0.0",
platform="Linux",
python="3.9",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
client_type="oss",
)
# 2. Run payload (flow execution)
run_payload = RunPayload(
run_seconds=120,
run_success=True,
client_type="oss",
run_error_message="",
run_id=None,
)
# 3. Component payloads (individual components)
component_payloads = [
ComponentPayload(
component_name="TextInput",
component_id="comp-input",
component_seconds=1,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
),
ComponentPayload(
component_name="OpenAI",
component_id="comp-openai",
component_seconds=5,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
),
ComponentPayload(
component_name="TextOutput",
component_id="comp-output",
component_seconds=1,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
),
]
# 4. Playground payload (testing)
playground_payload = PlaygroundPayload(
playground_seconds=30,
playground_component_count=3,
playground_success=True,
client_type="oss",
playground_error_message="",
playground_run_id=None,
)
# 5. Shutdown payload (cleanup)
shutdown_payload = ShutdownPayload(time_running=3600, client_type="oss")
# Verify all payloads have consistent client_type
all_payloads = [version_payload, run_payload, playground_payload, shutdown_payload, *component_payloads]
client_types = [p.client_type for p in all_payloads]
assert all(ct == "oss" for ct in client_types)
# Verify timing consistency
total_component_time = sum(cp.component_seconds for cp in component_payloads)
assert total_component_time <= run_payload.run_seconds
def test_error_propagation_workflow(self):
"""Test error propagation through different payload types."""
# Component that fails
failed_component = ComponentPayload(
component_name="OpenAI",
component_id="comp-failed",
component_seconds=5,
component_success=False,
component_error_message="API rate limit exceeded",
client_type="oss",
component_run_id=None,
)
# Run fails due to component failure
failed_run = RunPayload(
run_seconds=10,
run_success=False,
run_error_message="Component 'OpenAI' failed: API rate limit exceeded",
client_type="oss",
run_id=None,
)
# Playground fails
failed_playground = PlaygroundPayload(
playground_seconds=30,
playground_component_count=1,
playground_success=False,
playground_error_message="Test failed due to component error",
client_type="oss",
playground_run_id=None,
)
# Verify error consistency
assert not failed_component.component_success
assert not failed_run.run_success
assert not failed_playground.playground_success
assert "API rate limit exceeded" in (failed_component.component_error_message or "")
assert "API rate limit exceeded" in failed_run.run_error_message
# Test configuration and fixtures
@pytest.fixture
def sample_run_payload():
"""Fixture providing sample run payload for tests."""
return RunPayload(
run_is_webhook=False,
run_seconds=120,
run_success=True,
run_error_message="",
client_type="oss",
run_id=None,
)
@pytest.fixture
def sample_shutdown_payload():
"""Fixture providing sample shutdown payload for tests."""
return ShutdownPayload(time_running=3600, client_type="oss")
@pytest.fixture
def sample_version_payload():
"""Fixture providing sample version payload for tests."""
return VersionPayload(
package="langflow",
version="1.0.0",
platform="Linux-5.4.0",
python="3.9",
arch="x86_64",
auto_login=False,
cache_type="memory",
backend_only=False,
client_type="oss",
)
@pytest.fixture
def sample_playground_payload():
"""Fixture providing sample playground payload for tests."""
return PlaygroundPayload(
playground_seconds=45,
playground_component_count=5,
playground_success=True,
playground_error_message="",
client_type="oss",
playground_run_id=None,
)
@pytest.fixture
def sample_component_payload():
"""Fixture providing sample component payload for tests."""
return ComponentPayload(
component_name="TextInput",
component_id="comp-fixture",
component_seconds=2,
component_success=True,
component_error_message=None,
client_type="oss",
component_run_id=None,
)
# Performance and stress tests
class TestPayloadPerformance:
"""Performance tests for payload operations."""
def test_payload_creation_performance(self):
"""Test performance of creating many payload objects."""
import time
start_time = time.time()
# Create 1000 payload objects
payloads = []
for i in range(1000):
payload = RunPayload(run_seconds=i, run_success=True, client_type="oss", run_id=None, run_error_message="")
payloads.append(payload)
creation_time = time.time() - start_time
# Should create 1000 objects reasonably quickly (under 1 second)
assert creation_time < 1.0
assert len(payloads) == 1000
def test_payload_serialization_performance(self):
"""Test performance of serializing payload objects."""
import time
# Create complex payload
payload = VersionPayload(
package="langflow",
version="1.0.0",
platform="Linux-5.4.0-x86_64-with-glibc2.31",
python="3.9.7",
arch="x86_64",
auto_login=True,
cache_type="redis",
backend_only=False,
client_type="oss",
)
start_time = time.time()
# Serialize 1000 times
for _ in range(1000):
data = payload.model_dump(by_alias=True)
assert len(data) > 0
serialization_time = time.time() - start_time
# Should serialize 1000 objects reasonably quickly
assert serialization_time < 2.0
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/services/telemetry/test_telemetry_schema.py",
"license": "MIT License",
"lines": 739,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/base/langflow/alembic/versions/d37bc4322900_drop_single_constraint_on_files_name_.py | """Drop single constraint on file.name column
Revision ID: d37bc4322900
Revises: 0882f9657f22
Create Date: 2025-09-15 11:11:37.610294
"""
import logging
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
logger = logging.getLogger(__name__)
# revision identifiers, used by Alembic.
revision: str = "d37bc4322900"
down_revision: Union[str, None] = "0882f9657f22"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Remove single UNIQUE constraint on name column while preserving composite constraint."""
conn = op.get_bind()
inspector = sa.inspect(conn)
# Check if file table exists
table_names = inspector.get_table_names()
if "file" not in table_names:
logger.info("file table does not exist, skipping")
return
db_dialect = conn.dialect.name
logger.info(f"Running migration on {db_dialect} database")
try:
if db_dialect == "sqlite":
# SQLite: Recreate table without single UNIQUE constraint
logger.info("SQLite: Recreating table to remove single UNIQUE constraint on name")
# Guard against schema drift: ensure expected columns before destructive rebuild
res = conn.execute(sa.text('PRAGMA table_info("file")'))
cols = [row[1] for row in res]
expected = ['id', 'user_id', 'name', 'path', 'size', 'provider', 'created_at', 'updated_at']
if set(cols) != set(expected):
raise RuntimeError(f"SQLite: Unexpected columns on file table: {cols}. Aborting migration to avoid data loss.")
# Create the new table without the single UNIQUE(name) constraint
op.execute("""
CREATE TABLE file_new (
id CHAR(32) NOT NULL,
user_id CHAR(32) NOT NULL,
name VARCHAR NOT NULL,
path VARCHAR NOT NULL,
size INTEGER NOT NULL,
provider VARCHAR,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
PRIMARY KEY (id),
CONSTRAINT file_name_user_id_key UNIQUE (name, user_id),
FOREIGN KEY(user_id) REFERENCES user (id)
)
""")
# Copy data from old table to new table
op.execute("""
INSERT INTO file_new (id, user_id, name, path, size, provider, created_at, updated_at)
SELECT id, user_id, name, path, size, provider, created_at, updated_at
FROM file
""")
# Drop old table and rename new table
op.execute("PRAGMA foreign_keys=OFF")
try:
op.execute("DROP TABLE file")
op.execute("ALTER TABLE file_new RENAME TO file")
finally:
op.execute("PRAGMA foreign_keys=ON")
logger.info("SQLite: Successfully recreated file table without single UNIQUE constraint on name")
elif db_dialect == "postgresql":
# PostgreSQL: Find and drop single-column unique constraints on 'name'
logger.info("PostgreSQL: Finding and dropping single UNIQUE constraints and indexes on name")
# Determine target schema
schema = sa.inspect(conn).default_schema_name or "public"
# Get constraint names that are single-column unique on 'name'
result = conn.execute(sa.text("""
SELECT conname
FROM pg_constraint c
JOIN pg_class t ON c.conrelid = t.oid
JOIN pg_namespace n ON t.relnamespace = n.oid
WHERE t.relname = 'file'
AND n.nspname = :schema
AND c.contype = 'u'
AND array_length(c.conkey, 1) = 1
AND EXISTS (
SELECT 1 FROM pg_attribute a
WHERE a.attrelid = t.oid
AND a.attnum = c.conkey[1]
AND a.attname = 'name'
)
"""), {"schema": schema})
constraints_to_drop = [row[0] for row in result.fetchall()]
if constraints_to_drop:
for constraint_name in constraints_to_drop:
op.drop_constraint(constraint_name, "file", type_="unique", schema=schema)
logger.info(f"PostgreSQL: Dropped constraint {constraint_name}")
else:
logger.info("PostgreSQL: No single UNIQUE constraints found on name column")
# Also drop any single-column UNIQUE indexes on name not backed by constraints
idx_result = conn.execute(sa.text("""
SELECT i.relname
FROM pg_class t
JOIN pg_namespace n ON n.oid = t.relnamespace
JOIN pg_index ix ON ix.indrelid = t.oid
JOIN pg_class i ON i.oid = ix.indexrelid
WHERE t.relname = 'file'
AND n.nspname = :schema
AND ix.indisunique = TRUE
AND array_length(ix.indkey, 1) = 1
AND NOT EXISTS (SELECT 1 FROM pg_constraint c WHERE c.conindid = ix.indexrelid)
AND (SELECT a.attname FROM pg_attribute a
WHERE a.attrelid = t.oid AND a.attnum = ix.indkey[1]) = 'name'
"""), {"schema": schema})
for (index_name,) in idx_result.fetchall():
op.drop_index(index_name, table_name="file", schema=schema)
logger.info(f"PostgreSQL: Dropped unique index {index_name}")
else:
raise ValueError(f"Unsupported database dialect: {db_dialect}")
except Exception as e:
logger.error(f"Error during constraint removal: {e}")
raise
def downgrade() -> None:
"""Add back the single unique constraint on name column."""
conn = op.get_bind()
inspector = sa.inspect(conn)
# Check if file table exists
table_names = inspector.get_table_names()
if "file" not in table_names:
logger.info("file table does not exist, skipping downgrade")
return
db_dialect = conn.dialect.name
try:
# Pre-check for duplicates that would violate UNIQUE(name)
dup = conn.execute(sa.text("SELECT name FROM file GROUP BY name HAVING COUNT(*) > 1 LIMIT 1")).first()
if dup:
raise RuntimeError(
"Downgrade aborted: duplicates in file.name would violate UNIQUE(name). "
"Deduplicate before downgrading."
)
if db_dialect == "sqlite":
# Add the same column validation as upgrade
res = conn.execute(sa.text('PRAGMA table_info("file")'))
cols = [row[1] for row in res]
expected = ['id', 'user_id', 'name', 'path', 'size', 'provider', 'created_at', 'updated_at']
if set(cols) != set(expected):
raise RuntimeError(f"SQLite: Unexpected columns on file table: {cols}. Aborting downgrade.")
# SQLite: Recreate table with both constraints
logger.info("SQLite: Recreating table with both constraints")
op.execute("""
CREATE TABLE file_new (
id CHAR(32) NOT NULL,
user_id CHAR(32) NOT NULL,
name VARCHAR NOT NULL,
path VARCHAR NOT NULL,
size INTEGER NOT NULL,
provider VARCHAR,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
PRIMARY KEY (id),
CONSTRAINT file_name_user_id_key UNIQUE (name, user_id),
FOREIGN KEY(user_id) REFERENCES user (id),
UNIQUE (name)
)
""")
# Copy data
op.execute("""
INSERT INTO file_new (id, user_id, name, path, size, provider, created_at, updated_at)
SELECT id, user_id, name, path, size, provider, created_at, updated_at
FROM file
""")
# Replace table
op.execute("PRAGMA foreign_keys=OFF")
try:
op.execute("DROP TABLE file")
op.execute("ALTER TABLE file_new RENAME TO file")
finally:
op.execute("PRAGMA foreign_keys=ON")
logger.info("SQLite: Restored single unique constraint on name column")
elif db_dialect == "postgresql":
# PostgreSQL: Add constraint back
schema = sa.inspect(conn).default_schema_name or "public"
op.create_unique_constraint("file_name_unique", "file", ["name"], schema=schema)
logger.info("PostgreSQL: Added back single unique constraint on 'name' column")
else:
logger.info(f"Downgrade not supported for dialect: {db_dialect}")
except Exception as e:
logger.error(f"Error during downgrade: {e}")
if "constraint" not in str(e).lower():
raise
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/base/langflow/alembic/versions/d37bc4322900_drop_single_constraint_on_files_name_.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langflow-ai/langflow:src/backend/base/langflow/api/v1/auth_helpers.py | from typing import Any
from pydantic import SecretStr
from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings
from langflow.services.database.models.folder.model import Folder
def handle_auth_settings_update(
existing_project: Folder,
new_auth_settings: dict | Any | None,
) -> dict[str, bool]:
"""Handle auth settings update including encryption/decryption and MCP Composer logic.
Args:
existing_project: The project being updated (modified in-place)
new_auth_settings: New auth settings (could be dict, Pydantic model, or None)
Returns:
Dict containing:
- should_start_composer: bool
- should_stop_composer: bool
"""
# Get current auth type before update
current_auth_type = None
decrypted_current = None
if existing_project.auth_settings:
current_auth_type = existing_project.auth_settings.get("auth_type")
# Only decrypt if we need access to sensitive fields (for preserving masked values)
if current_auth_type in ["oauth", "apikey"]:
decrypted_current = decrypt_auth_settings(existing_project.auth_settings)
if new_auth_settings is None:
# Explicitly set to None - clear auth settings
existing_project.auth_settings = None
# If we were using OAuth, stop the composer
return {"should_start_composer": False, "should_stop_composer": current_auth_type == "oauth"}
# Handle different input types (dict vs Pydantic model)
if isinstance(new_auth_settings, dict):
auth_dict = new_auth_settings.copy()
else:
# Pydantic model - use python mode to get raw values without SecretStr masking
auth_dict = new_auth_settings.model_dump(mode="python", exclude_none=True)
# Handle SecretStr fields
secret_fields = ["api_key", "oauth_client_secret"]
for field in secret_fields:
field_val = getattr(new_auth_settings, field, None)
if isinstance(field_val, SecretStr):
auth_dict[field] = field_val.get_secret_value()
new_auth_type = auth_dict.get("auth_type")
# Handle masked secret fields from frontend
# If frontend sends back "*******" for a secret field, preserve the existing value
if decrypted_current:
secret_fields = ["oauth_client_secret", "api_key"]
for field in secret_fields:
if field in auth_dict and auth_dict[field] == "*******" and field in decrypted_current:
auth_dict[field] = decrypted_current[field]
# Encrypt and store the auth settings
existing_project.auth_settings = encrypt_auth_settings(auth_dict)
# Determine MCP Composer actions
should_start_composer = new_auth_type == "oauth"
should_stop_composer = current_auth_type == "oauth" and new_auth_type != "oauth"
should_handle_composer = current_auth_type == "oauth" or new_auth_type == "oauth"
return {
"should_start_composer": should_start_composer,
"should_stop_composer": should_stop_composer,
"should_handle_composer": should_handle_composer,
}
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/base/langflow/api/v1/auth_helpers.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/backend/tests/integration/components/mcp/test_mcp_superuser_flow.py | import pytest
from langflow.services.auth.utils import create_user_longterm_token
from langflow.services.deps import get_settings_service
from langflow.services.utils import initialize_services
from lfx.services.deps import session_scope
@pytest.mark.skip(reason="MCP Projects can only create long-term tokens if AUTO_LOGIN is enabled")
async def test_mcp_longterm_token_headless_superuser_integration():
"""Integration-style check that without explicit credentials, AUTO_LOGIN=false path.
Creates a headless superuser via initialize_services and allows minting a long-term token.
"""
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = False
settings.auth_settings.SUPERUSER = ""
settings.auth_settings.SUPERUSER_PASSWORD = ""
await initialize_services()
async with session_scope() as session:
user_id, tokens = await create_user_longterm_token(session)
assert user_id is not None
assert tokens.get("access_token")
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/integration/components/mcp/test_mcp_superuser_flow.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/integration/test_image_providers.py | """Integration tests for image content dict format with real LLM providers.
These tests verify that the standardized image content dict format works
correctly with actual API calls to OpenAI, Anthropic, and Google Gemini.
Tests are skipped if required API keys are not available.
"""
import base64
import os
import pytest
from langflow.utils.image import create_image_content_dict
from tests.api_keys import has_api_key
@pytest.fixture
def sample_image(tmp_path):
"""Create a sample image file for testing."""
image_path = tmp_path / "test_image.png"
# Create a small black 1x1 pixel PNG file
image_content = base64.b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="
)
image_path.write_bytes(image_content)
return image_path
@pytest.fixture
def sample_jpeg_image(tmp_path):
"""Create a sample image file with .jpg extension for testing MIME type detection."""
# Use the same PNG data but with .jpg extension to test MIME detection
# This tests that our code correctly detects MIME type from file extension
image_content = base64.b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="
)
image_path = tmp_path / "test_image.jpg" # .jpg extension
image_path.write_bytes(image_content)
return image_path
# use shared has_api_key from tests.api_keys
@pytest.mark.skipif(not has_api_key("OPENAI_API_KEY"), reason="OPENAI_API_KEY not available in CI")
def test_openai_vision_api_real_call(sample_image):
"""Test that image content dict works with real OpenAI Vision API calls."""
try:
import openai
except ImportError:
pytest.skip("OpenAI package not installed")
from tests.api_keys import get_openai_api_key
client = openai.OpenAI(api_key=get_openai_api_key())
content_dict = create_image_content_dict(sample_image)
# Test the message structure with OpenAI
messages = [
{
"role": "user",
"content": [{"type": "text", "text": "What color is this image? Just answer with one word."}, content_dict],
}
]
try:
response = client.chat.completions.create(model="gpt-4o-mini", messages=messages, max_tokens=10)
# If we get here without an exception, the format is accepted
assert response.choices[0].message.content is not None
except Exception as e:
pytest.fail(f"OpenAI API call failed with image content dict format: {e}")
@pytest.mark.skipif(not has_api_key("OPENAI_API_KEY"), reason="OPENAI_API_KEY not available in CI")
def test_openai_vision_api_with_jpeg(sample_jpeg_image):
"""Test OpenAI Vision API with JPEG image format."""
try:
import openai
except ImportError:
pytest.skip("OpenAI package not installed")
from tests.api_keys import get_openai_api_key
client = openai.OpenAI(api_key=get_openai_api_key())
content_dict = create_image_content_dict(sample_jpeg_image)
# Verify JPEG format is correctly detected from file extension
assert "data:image/jpeg;base64," in content_dict["image_url"]["url"]
messages = [
{"role": "user", "content": [{"type": "text", "text": "Describe this image in one word."}, content_dict]}
]
try:
response = client.chat.completions.create(model="gpt-4o-mini", messages=messages, max_tokens=10)
assert response.choices[0].message.content is not None
# API call successful
except Exception as e:
pytest.fail(f"OpenAI API call failed with JPEG image: {e}")
@pytest.mark.skipif(not has_api_key("ANTHROPIC_API_KEY"), reason="ANTHROPIC_API_KEY not available in CI")
def test_anthropic_vision_api_real_call(sample_image):
"""Test that image content dict works with real Anthropic Claude API calls."""
try:
import anthropic
except ImportError:
pytest.skip("Anthropic package not installed")
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
content_dict = create_image_content_dict(sample_image)
# Convert our standardized format to Anthropic's format
data_url = content_dict["image_url"]["url"]
mime_type, base64_data = data_url.split(";base64,")
mime_type = mime_type.replace("data:", "")
# Anthropic format
anthropic_image = {"type": "image", "source": {"type": "base64", "media_type": mime_type, "data": base64_data}}
# Test the message structure with Anthropic Claude
messages = [
{
"role": "user",
"content": [{"type": "text", "text": "What is in this image? Answer in one word."}, anthropic_image],
}
]
try:
response = client.messages.create(model="claude-3-haiku-20240307", max_tokens=10, messages=messages)
# If we get here without an exception, the format conversion worked
assert response.content[0].text is not None
# API call successful
except Exception as e:
pytest.fail(f"Anthropic API call failed when converting from image content dict format: {e}")
@pytest.mark.skipif(not has_api_key("ANTHROPIC_API_KEY"), reason="ANTHROPIC_API_KEY not available in CI")
def test_anthropic_vision_api_with_jpeg(sample_jpeg_image):
"""Test Anthropic Claude API with JPEG image format."""
try:
import anthropic
except ImportError:
pytest.skip("Anthropic package not installed")
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
content_dict = create_image_content_dict(sample_jpeg_image)
# Verify JPEG format is correctly detected from file extension
assert "data:image/jpeg;base64," in content_dict["image_url"]["url"]
# Convert our standardized format to Anthropic's format
data_url = content_dict["image_url"]["url"]
mime_type, base64_data = data_url.split(";base64,")
mime_type = mime_type.replace("data:", "")
# Anthropic format
anthropic_image = {"type": "image", "source": {"type": "base64", "media_type": mime_type, "data": base64_data}}
messages = [
{"role": "user", "content": [{"type": "text", "text": "What do you see? One word answer."}, anthropic_image]}
]
try:
response = client.messages.create(model="claude-3-haiku-20240307", max_tokens=10, messages=messages)
assert response.content[0].text is not None
# API call successful
except Exception as e:
pytest.fail(f"Anthropic API call failed with JPEG image: {e}")
@pytest.mark.skipif(not has_api_key("GEMINI_API_KEY"), reason="GEMINI_API_KEY not available in CI")
def test_google_gemini_vision_api_real_call(sample_image):
"""Test that image content dict works with real Google Gemini API calls."""
try:
import google.generativeai as genai
except ImportError:
pytest.skip("Google Generative AI package not installed")
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
model = genai.GenerativeModel("gemini-2.5-flash")
content_dict = create_image_content_dict(sample_image)
# Convert our format to what Gemini expects
# Gemini uses a different format, but we need to verify our dict doesn't break when converted
try:
# Extract the data URL from our format
data_url = content_dict["image_url"]["url"]
# For Gemini, we need to extract just the base64 part
mime_type, base64_data = data_url.split(";base64,")
mime_type = mime_type.replace("data:", "")
# Gemini format
gemini_image = {"mime_type": mime_type, "data": base64.b64decode(base64_data)}
response = model.generate_content(["What is in this image? Answer in one word.", gemini_image])
assert response.text is not None
# API call successful
except Exception as e:
pytest.fail(f"Google Gemini API call failed when processing image content dict: {e}")
@pytest.mark.skipif(not has_api_key("GEMINI_API_KEY"), reason="GEMINI_API_KEY not available in CI")
def test_google_gemini_vision_api_with_jpeg(sample_jpeg_image):
"""Test Google Gemini API with JPEG image format."""
try:
import google.generativeai as genai
except ImportError:
pytest.skip("Google Generative AI package not installed")
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
model = genai.GenerativeModel("gemini-2.5-flash")
content_dict = create_image_content_dict(sample_jpeg_image)
# Verify JPEG format is correctly detected from file extension
assert "data:image/jpeg;base64," in content_dict["image_url"]["url"]
try:
# Convert our format for Gemini
data_url = content_dict["image_url"]["url"]
mime_type, base64_data = data_url.split(";base64,")
mime_type = mime_type.replace("data:", "")
gemini_image = {"mime_type": mime_type, "data": base64.b64decode(base64_data)}
response = model.generate_content(["Describe this image briefly.", gemini_image])
assert response.text is not None
# API call successful
except Exception as e:
pytest.fail(f"Google Gemini API call failed with JPEG image: {e}")
def test_langchain_integration_format_compatibility(sample_image):
"""Test that the image content dict integrates properly with LangChain message formats."""
content_dict = create_image_content_dict(sample_image)
# Test LangChain-style message structure
langchain_message = {
"role": "user",
"content": [
{"type": "text", "text": "Analyze this image"},
content_dict, # Our standardized format should fit here
],
}
# Verify the structure is what LangChain expects
assert len(langchain_message["content"]) == 2
text_part = langchain_message["content"][0]
image_part = langchain_message["content"][1]
assert text_part["type"] == "text"
assert image_part["type"] == "image_url"
assert "image_url" in image_part
assert "url" in image_part["image_url"]
# This format should be compatible with LangChain's OpenAI and Anthropic integrations
# because it follows the standardized structure they expect
@pytest.mark.skipif(
not (has_api_key("OPENAI_API_KEY") and has_api_key("ANTHROPIC_API_KEY")),
reason="Both OPENAI_API_KEY and ANTHROPIC_API_KEY needed for cross-provider test",
)
def test_cross_provider_consistency(sample_image):
"""Test that the same image content dict works across multiple providers."""
content_dict = create_image_content_dict(sample_image)
# Test with OpenAI
try:
import openai
from tests.api_keys import get_openai_api_key
openai_client = openai.OpenAI(api_key=get_openai_api_key())
openai_response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": [{"type": "text", "text": "What color is this? One word."}, content_dict]}
],
max_tokens=5,
)
openai_result = openai_response.choices[0].message.content
# API call successful
except ImportError:
pytest.skip("OpenAI package not available for cross-provider test")
# Test with Anthropic using the same content_dict (but converted to Anthropic format)
try:
import anthropic
anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
# Convert our standardized format to Anthropic's format
data_url = content_dict["image_url"]["url"]
mime_type, base64_data = data_url.split(";base64,")
mime_type = mime_type.replace("data:", "")
anthropic_image = {"type": "image", "source": {"type": "base64", "media_type": mime_type, "data": base64_data}}
anthropic_response = anthropic_client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=5,
messages=[
{
"role": "user",
"content": [{"type": "text", "text": "What color is this? One word."}, anthropic_image],
}
],
)
anthropic_result = anthropic_response.content[0].text
# API call successful
except ImportError:
pytest.skip("Anthropic package not available for cross-provider test")
# Both should process the same format successfully
# (We don't assert they give the same answer since models may interpret differently)
assert openai_result is not None
assert anthropic_result is not None
def test_error_handling_without_api_keys(sample_image):
"""Test that image content dict format is valid even without API access."""
content_dict = create_image_content_dict(sample_image)
# The format should be correct regardless of API availability
assert content_dict["type"] == "image_url"
assert "image_url" in content_dict
assert "url" in content_dict["image_url"]
# Should not contain legacy fields that caused provider issues
assert "source_type" not in content_dict
assert "source" not in content_dict
assert "media_type" not in content_dict
# URL should be a valid data URL
url = content_dict["image_url"]["url"]
assert url.startswith("data:image/")
assert ";base64," in url
# Base64 part should be valid
base64_part = url.split(";base64,")[1]
assert base64.b64decode(base64_part)
if __name__ == "__main__":
# Print which API keys are available for manual testing
keys_available = []
if has_api_key("OPENAI_API_KEY"):
keys_available.append("OpenAI")
if has_api_key("ANTHROPIC_API_KEY"):
keys_available.append("Anthropic")
if has_api_key("GEMINI_API_KEY"):
keys_available.append("Gemini")
# Available API keys can be checked via has_api_key() function
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/integration/test_image_providers.py",
"license": "MIT License",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/test_auth_settings.py | from pathlib import Path
import pytest
from lfx.services.settings.auth import AuthSettings
from lfx.services.settings.constants import DEFAULT_SUPERUSER
from pydantic import SecretStr, ValidationError
@pytest.mark.parametrize("auto_login", [True, False])
def test_superuser_password_is_secretstr(auto_login, tmp_path: Path):
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(CONFIG_DIR=cfg_dir, AUTO_LOGIN=auto_login)
assert isinstance(settings.SUPERUSER_PASSWORD, SecretStr)
def test_auto_login_true_forces_default_and_scrubs_password(tmp_path: Path):
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(
CONFIG_DIR=cfg_dir,
AUTO_LOGIN=True,
SUPERUSER="custom",
SUPERUSER_PASSWORD=SecretStr("_changed"),
)
# Validator forces default username and scrubs password
assert settings.SUPERUSER == DEFAULT_SUPERUSER
assert isinstance(settings.SUPERUSER_PASSWORD, SecretStr)
assert settings.SUPERUSER_PASSWORD.get_secret_value() == "langflow"
# reset_credentials keeps default username (AUTO_LOGIN on) and keeps password scrubbed
settings.reset_credentials()
assert settings.SUPERUSER == DEFAULT_SUPERUSER
assert settings.SUPERUSER_PASSWORD.get_secret_value() == "langflow"
def test_auto_login_false_preserves_username_and_scrubs_password_on_reset(tmp_path: Path):
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(
CONFIG_DIR=cfg_dir,
AUTO_LOGIN=False,
SUPERUSER="admin",
SUPERUSER_PASSWORD=SecretStr("strongpass"),
)
# Values preserved at init
assert settings.SUPERUSER == "admin"
assert settings.SUPERUSER_PASSWORD.get_secret_value() == "strongpass"
# After reset, username preserved, password scrubbed
settings.reset_credentials()
assert settings.SUPERUSER == "admin"
assert settings.SUPERUSER_PASSWORD.get_secret_value() == ""
# ============================================================================
# API_KEY_SOURCE Settings Tests
# ============================================================================
class TestApiKeySourceSettings:
"""Tests for API_KEY_SOURCE configuration setting."""
def test_api_key_source_default_is_db(self, tmp_path: Path):
"""Default API_KEY_SOURCE should be 'db' for backward compatibility."""
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(CONFIG_DIR=cfg_dir)
assert settings.API_KEY_SOURCE == "db"
def test_api_key_source_accepts_db(self, tmp_path: Path):
"""API_KEY_SOURCE should accept 'db' value."""
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(CONFIG_DIR=cfg_dir, API_KEY_SOURCE="db")
assert settings.API_KEY_SOURCE == "db"
def test_api_key_source_accepts_env(self, tmp_path: Path):
"""API_KEY_SOURCE should accept 'env' value."""
cfg_dir = tmp_path.as_posix()
settings = AuthSettings(CONFIG_DIR=cfg_dir, API_KEY_SOURCE="env")
assert settings.API_KEY_SOURCE == "env"
def test_api_key_source_rejects_invalid_value(self, tmp_path: Path):
"""API_KEY_SOURCE should reject invalid values."""
cfg_dir = tmp_path.as_posix()
with pytest.raises(ValidationError) as exc_info:
AuthSettings(CONFIG_DIR=cfg_dir, API_KEY_SOURCE="invalid")
assert "API_KEY_SOURCE" in str(exc_info.value)
def test_api_key_source_rejects_empty_string(self, tmp_path: Path):
"""API_KEY_SOURCE should reject empty string."""
cfg_dir = tmp_path.as_posix()
with pytest.raises(ValidationError):
AuthSettings(CONFIG_DIR=cfg_dir, API_KEY_SOURCE="")
class TestApiKeySourceEnvironmentVariables:
"""Tests for API_KEY_SOURCE loaded from environment variables."""
def test_api_key_source_from_env_var(self, tmp_path: Path, monkeypatch):
"""API_KEY_SOURCE should be loaded from LANGFLOW_API_KEY_SOURCE env var."""
cfg_dir = tmp_path.as_posix()
monkeypatch.setenv("LANGFLOW_API_KEY_SOURCE", "env")
settings = AuthSettings(CONFIG_DIR=cfg_dir)
assert settings.API_KEY_SOURCE == "env"
def test_explicit_value_overrides_env_var(self, tmp_path: Path, monkeypatch):
"""Explicit parameter should override environment variable."""
cfg_dir = tmp_path.as_posix()
monkeypatch.setenv("LANGFLOW_API_KEY_SOURCE", "env")
settings = AuthSettings(CONFIG_DIR=cfg_dir, API_KEY_SOURCE="db")
assert settings.API_KEY_SOURCE == "db"
def test_invalid_api_key_source_from_env_var(self, tmp_path: Path, monkeypatch):
"""Invalid API_KEY_SOURCE from env var should raise ValidationError."""
cfg_dir = tmp_path.as_posix()
monkeypatch.setenv("LANGFLOW_API_KEY_SOURCE", "invalid")
with pytest.raises(ValidationError):
AuthSettings(CONFIG_DIR=cfg_dir)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_auth_settings.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/test_security_cors.py | """Unit tests for CORS security configuration."""
import os
import tempfile
import warnings
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from fastapi import HTTPException
from fastapi.middleware.cors import CORSMiddleware
from lfx.services.settings.base import Settings
class TestCORSConfiguration:
"""Test CORS configuration and security validations."""
def test_default_cors_settings_current_behavior(self):
"""Test current CORS settings behavior (warns about security implications)."""
with tempfile.TemporaryDirectory() as temp_dir, patch.dict(os.environ, {"LANGFLOW_CONFIG_DIR": temp_dir}):
settings = Settings()
# Current behavior: wildcard origins with credentials ENABLED (insecure)
assert settings.cors_origins == "*"
assert settings.cors_allow_credentials is True # Currently defaults to True (insecure)
assert settings.cors_allow_methods == "*"
assert settings.cors_allow_headers == "*"
# Warn about CRITICAL security implications
warnings.warn(
"CRITICAL SECURITY WARNING: Current CORS configuration uses wildcard origins (*) "
"WITH CREDENTIALS ENABLED! This allows any website to make authenticated requests "
"to your Langflow instance and potentially steal user credentials. "
"This will be changed to more secure defaults in v1.7. "
"Please configure LANGFLOW_CORS_ORIGINS with specific domains for production use.",
UserWarning,
stacklevel=2,
)
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure behavior")
def test_default_cors_settings_secure_future(self):
"""Test future default CORS settings that will be secure (skip until v1.7)."""
# This test represents the behavior we want in v1.7
# with tempfile.TemporaryDirectory() as temp_dir, patch.dict(os.environ, {"LANGFLOW_CONFIG_DIR": temp_dir}):
# settings = Settings()
# # Future secure defaults:
# assert settings.cors_origins == ["http://localhost:3000", "http://127.0.0.1:3000"]
# assert settings.cors_allow_credentials is True
# assert settings.cors_allow_methods == ["GET", "POST", "PUT", "DELETE"]
# assert settings.cors_allow_headers == ["Content-Type", "Authorization"]
def test_cors_origins_string_to_list_conversion(self):
"""Test comma-separated origins are converted to list."""
with (
tempfile.TemporaryDirectory() as temp_dir,
patch.dict(
os.environ,
{
"LANGFLOW_CONFIG_DIR": temp_dir,
"LANGFLOW_CORS_ORIGINS": "https://app1.example.com,https://app2.example.com",
},
),
):
settings = Settings()
assert settings.cors_origins == ["https://app1.example.com", "https://app2.example.com"]
def test_single_origin_converted_to_list(self):
"""Test single origin is converted to list for consistency."""
with (
tempfile.TemporaryDirectory() as temp_dir,
patch.dict(
os.environ,
{
"LANGFLOW_CONFIG_DIR": temp_dir,
"LANGFLOW_CORS_ORIGINS": "https://app.example.com",
},
),
):
settings = Settings()
assert settings.cors_origins == ["https://app.example.com"]
def test_wildcard_with_credentials_allowed_current_behavior(self):
"""Test that credentials are NOT disabled when using wildcard origins (current insecure behavior)."""
with (
tempfile.TemporaryDirectory() as temp_dir,
patch.dict(
os.environ,
{
"LANGFLOW_CONFIG_DIR": temp_dir,
"LANGFLOW_CORS_ORIGINS": "*",
"LANGFLOW_CORS_ALLOW_CREDENTIALS": "true",
},
),
):
settings = Settings()
assert settings.cors_origins == "*"
# Current behavior: credentials are NOT prevented (INSECURE!)
assert settings.cors_allow_credentials is True
# Warn about the CRITICAL security implications
warnings.warn(
"CRITICAL SECURITY WARNING: Wildcard CORS origins (*) WITH CREDENTIALS ENABLED! "
"This is a severe security vulnerability that allows any website to make "
"authenticated requests and potentially steal user credentials. "
"This MUST be fixed in production! Configure specific origins immediately.",
UserWarning,
stacklevel=2,
)
def test_specific_origins_allow_credentials(self):
"""Test that credentials work with specific origins."""
with (
tempfile.TemporaryDirectory() as temp_dir,
patch.dict(
os.environ,
{
"LANGFLOW_CONFIG_DIR": temp_dir,
"LANGFLOW_CORS_ORIGINS": "https://app.example.com",
"LANGFLOW_CORS_ALLOW_CREDENTIALS": "true",
},
),
):
settings = Settings()
assert settings.cors_origins == ["https://app.example.com"]
assert settings.cors_allow_credentials is True
@patch("langflow.main.setup_sentry") # Mock Sentry setup
@patch("langflow.main.get_settings_service")
def test_cors_middleware_configuration(self, mock_get_settings, mock_setup_sentry):
"""Test that CORS middleware is configured correctly in the app."""
from langflow.main import create_app
# Mock settings
mock_settings = MagicMock()
mock_settings.settings.cors_origins = ["https://app.example.com"]
mock_settings.settings.cors_allow_credentials = True
mock_settings.settings.cors_allow_methods = ["GET", "POST"]
mock_settings.settings.cors_allow_headers = ["Content-Type"]
mock_settings.settings.prometheus_enabled = False
mock_settings.settings.mcp_server_enabled = False
mock_settings.settings.sentry_dsn = None # Disable Sentry
mock_get_settings.return_value = mock_settings
# Create app
mock_setup_sentry.return_value = None # Use the mock
app = create_app()
# Find CORS middleware
cors_middleware = None
for middleware in app.user_middleware:
if middleware.cls == CORSMiddleware:
cors_middleware = middleware
break
assert cors_middleware is not None
assert cors_middleware.kwargs["allow_origins"] == ["https://app.example.com"]
assert cors_middleware.kwargs["allow_credentials"] is True
assert cors_middleware.kwargs["allow_methods"] == ["GET", "POST"]
assert cors_middleware.kwargs["allow_headers"] == ["Content-Type"]
@patch("langflow.main.setup_sentry") # Mock Sentry setup
@patch("langflow.main.get_settings_service")
@patch("langflow.main.logger")
def test_cors_wildcard_credentials_runtime_check_current_behavior(
self, mock_logger, mock_get_settings, mock_setup_sentry
):
"""Test runtime validation prevents wildcard with credentials (current behavior)."""
from langflow.main import create_app
# Mock settings with configuration that triggers current security measure
mock_settings = MagicMock()
mock_settings.settings.cors_origins = "*"
mock_settings.settings.cors_allow_credentials = True # Gets disabled for security
mock_settings.settings.cors_allow_methods = "*"
mock_settings.settings.cors_allow_headers = "*"
mock_settings.settings.prometheus_enabled = False
mock_settings.settings.mcp_server_enabled = False
mock_settings.settings.sentry_dsn = None # Disable Sentry
mock_get_settings.return_value = mock_settings
# Create app
mock_setup_sentry.return_value = None # Use the mock
app = create_app()
# Check that warning was logged about deprecation/security
# The actual warning message is different from what we expected
warning_calls = [str(call) for call in mock_logger.warning.call_args_list]
# We expect warnings about the insecure configuration - check for the actual message
assert any("CORS" in str(call) and "permissive" in str(call) for call in warning_calls), (
f"Expected CORS security warning but got: {warning_calls}"
)
# Find CORS middleware and verify credentials are still allowed (current insecure behavior)
cors_middleware = None
for middleware in app.user_middleware:
if middleware.cls == CORSMiddleware:
cors_middleware = middleware
break
assert cors_middleware is not None
assert cors_middleware.kwargs["allow_origins"] == "*"
assert cors_middleware.kwargs["allow_credentials"] is True # Current behavior: NOT disabled (insecure!)
# Warn about the security implications
warnings.warn(
"CRITICAL SECURITY WARNING: Current behavior allows wildcard origins WITH CREDENTIALS ENABLED! "
"This is a severe security vulnerability. Any website can make authenticated requests. "
"In v1.7, this will be changed to secure defaults with specific origins only.",
UserWarning,
stacklevel=2,
)
class TestRefreshTokenSecurity:
"""Test refresh token security improvements."""
@pytest.mark.asyncio
@pytest.mark.skip(reason="Token type validation not implemented - security enhancement for future")
async def test_refresh_token_type_validation(self):
"""Test that refresh token validates token type.
NOTE: Currently the code doesn't validate that the token type is 'refresh'.
It only checks if the token_type is empty. This should be enhanced.
"""
from langflow.services.deps import get_auth_service
mock_db = MagicMock()
with patch("langflow.services.auth.utils.jwt.decode") as mock_decode:
# Test with wrong token type - use a valid UUID string
mock_decode.return_value = {"sub": "123e4567-e89b-12d3-a456-426614174000", "type": "access"} # Wrong type
with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
mock_settings.return_value.auth_settings.SECRET_KEY.get_secret_value.return_value = "secret"
mock_settings.return_value.auth_settings.ALGORITHM = "HS256"
mock_settings.return_value.auth_settings.ACCESS_TOKEN_EXPIRE_SECONDS = 3600
mock_settings.return_value.auth_settings.REFRESH_TOKEN_EXPIRE_SECONDS = 86400
# This SHOULD raise an exception for wrong token type, but currently doesn't
with pytest.raises(HTTPException) as exc_info:
await get_auth_service().create_refresh_token("fake-token", mock_db)
assert exc_info.value.status_code == 401
assert "Invalid refresh token" in str(exc_info.value.detail)
@pytest.mark.asyncio
@pytest.mark.skip(reason="User activity check not implemented yet - security enhancement for future")
async def test_refresh_token_user_active_check(self):
"""Test that inactive users cannot refresh tokens.
NOTE: This is a security enhancement that should be implemented.
Currently, the system does not check if a user is active when refreshing tokens.
"""
from langflow.services.deps import get_auth_service
mock_db = MagicMock()
mock_user = MagicMock()
mock_user.is_active = False # Inactive user
with patch("langflow.services.auth.utils.jwt.decode") as mock_decode:
mock_decode.return_value = {"sub": "user-123", "type": "refresh"} # Correct type
with patch("langflow.services.auth.utils.get_settings_service") as mock_settings:
mock_settings.return_value.auth_settings.SECRET_KEY.get_secret_value.return_value = "secret"
mock_settings.return_value.auth_settings.ALGORITHM = "HS256"
mock_settings.return_value.auth_settings.ACCESS_TOKEN_EXPIRE_SECONDS = 3600 # 1 hour
mock_settings.return_value.auth_settings.REFRESH_TOKEN_EXPIRE_SECONDS = 86400 # 1 day
with patch("langflow.services.auth.utils.get_user_by_id") as mock_get_user:
mock_get_user.return_value = mock_user
# This SHOULD raise an exception for inactive users, but currently doesn't
with pytest.raises(HTTPException) as exc_info:
await get_auth_service().create_refresh_token("fake-token", mock_db)
assert exc_info.value.status_code == 401
assert "inactive" in str(exc_info.value.detail).lower()
@pytest.mark.asyncio
async def test_refresh_token_valid_flow(self):
"""Test that valid refresh tokens work correctly."""
from uuid import uuid4
from langflow.services.auth.utils import create_refresh_token
mock_db = AsyncMock()
mock_user = MagicMock()
mock_user.is_active = True # Active user
user_id = uuid4()
mock_user.id = user_id
with patch("langflow.services.auth.service.jwt.decode") as mock_decode:
mock_decode.return_value = {"sub": str(user_id), "type": "refresh"} # Correct type
with patch("langflow.services.auth.utils.get_jwt_verification_key") as mock_verification_key:
mock_verification_key.return_value = "secret"
with patch("langflow.services.auth.service.get_user_by_id", new_callable=AsyncMock) as mock_get_user:
mock_get_user.return_value = mock_user
with patch(
"langflow.services.auth.service.AuthService.create_user_tokens", new_callable=AsyncMock
) as mock_create_tokens:
expected_access = "new-access-token"
expected_refresh = "new-refresh-token"
mock_create_tokens.return_value = {
"access_token": expected_access,
"refresh_token": expected_refresh,
}
result = await create_refresh_token("fake-token", mock_db)
assert result["access_token"] == expected_access
assert result["refresh_token"] == expected_refresh
# user_id is converted to string in JWT payload, then back to UUID in service
mock_create_tokens.assert_called_once_with(str(user_id), mock_db)
def test_refresh_token_samesite_setting_current_behavior(self):
"""Test current refresh token SameSite settings (warns about security)."""
from lfx.services.settings.auth import AuthSettings
with tempfile.TemporaryDirectory() as temp_dir, patch.dict(os.environ, {"LANGFLOW_CONFIG_DIR": temp_dir}):
auth_settings = AuthSettings(CONFIG_DIR=temp_dir)
# Current behavior: refresh token uses 'none' (allows cross-site)
assert auth_settings.REFRESH_SAME_SITE == "none" # Current: allows cross-site (less secure)
assert auth_settings.ACCESS_SAME_SITE == "lax" # Access token is already lax (good)
# Warn about security implications
warnings.warn(
"SECURITY WARNING: Refresh tokens currently use SameSite=none which allows "
"cross-site requests. This should be changed to 'lax' or 'strict' in production. "
"In v1.7, this will default to 'lax' for better security.",
UserWarning,
stacklevel=2,
)
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure SameSite behavior")
def test_refresh_token_samesite_setting_future_secure(self):
"""Test future secure refresh token SameSite settings (skip until v1.7)."""
# Future secure behavior (uncomment in v1.7):
# from langflow.services.settings.auth import AuthSettings
# with tempfile.TemporaryDirectory() as temp_dir, patch.dict(os.environ, {"LANGFLOW_CONFIG_DIR": temp_dir}):
# auth_settings = AuthSettings(CONFIG_DIR=temp_dir)
# assert auth_settings.REFRESH_SAME_SITE == "lax" # Secure default
# assert auth_settings.ACCESS_SAME_SITE == "lax"
class TestCORSIntegration:
"""Integration tests for CORS with actual HTTP requests."""
@pytest.mark.asyncio
@patch("langflow.main.setup_sentry") # Mock Sentry setup
async def test_cors_headers_in_response_current_behavior(self, mock_setup_sentry):
"""Test that CORS headers are properly set in responses (current behavior)."""
from fastapi.testclient import TestClient
with patch("langflow.main.get_settings_service") as mock_get_settings:
mock_settings = MagicMock()
mock_settings.settings.cors_origins = ["https://app.example.com"]
mock_settings.settings.cors_allow_credentials = True
mock_settings.settings.cors_allow_methods = "*"
mock_settings.settings.cors_allow_headers = "*"
mock_settings.settings.prometheus_enabled = False
mock_settings.settings.mcp_server_enabled = False
mock_settings.settings.sentry_dsn = None # Disable Sentry
mock_get_settings.return_value = mock_settings
from langflow.main import create_app
mock_setup_sentry.return_value = None # Use the mock
app = create_app()
client = TestClient(app)
# Make OPTIONS request (CORS preflight)
response = client.options(
"/api/v1/version",
headers={
"Origin": "https://app.example.com",
"Access-Control-Request-Method": "GET",
},
)
assert response.status_code == 200
assert response.headers.get("access-control-allow-origin") == "https://app.example.com"
assert response.headers.get("access-control-allow-credentials") == "true"
# Warn that this is testing current behavior
warnings.warn(
"This test validates current CORS behavior. In v1.7, default origins will be more restrictive.",
UserWarning,
stacklevel=2,
)
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure CORS blocking behavior")
async def test_cors_blocks_unauthorized_origin_future_secure(self):
"""Test that future secure CORS configuration blocks unauthorized origins (skip until v1.7)."""
# This test represents the behavior we want in v1.7 with secure defaults
@pytest.mark.asyncio
@patch("langflow.main.setup_sentry") # Mock Sentry setup
async def test_cors_blocks_unauthorized_origin_current_behavior(self, mock_setup_sentry):
"""Test that CORS blocks requests from unauthorized origins."""
from fastapi.testclient import TestClient
with patch("langflow.main.get_settings_service") as mock_get_settings:
mock_settings = MagicMock()
mock_settings.settings.cors_origins = ["https://app.example.com"]
mock_settings.settings.cors_allow_credentials = True
mock_settings.settings.cors_allow_methods = "*"
mock_settings.settings.cors_allow_headers = "*"
mock_settings.settings.prometheus_enabled = False
mock_settings.settings.mcp_server_enabled = False
mock_settings.settings.sentry_dsn = None # Disable Sentry
mock_get_settings.return_value = mock_settings
from langflow.main import create_app
mock_setup_sentry.return_value = None # Use the mock
app = create_app()
client = TestClient(app)
# Make OPTIONS request from unauthorized origin
response = client.options(
"/api/v1/version",
headers={
"Origin": "https://evil.com",
"Access-Control-Request-Method": "GET",
},
)
assert response.status_code == 400 # CORS will block this
# Warn about current behavior implications
warnings.warn(
"This test shows current CORS behavior with specific origins. "
"Note that current default behavior uses wildcard origins (*) which would NOT block this. "
"In v1.7, secure defaults will be implemented to prevent unauthorized origins.",
UserWarning,
stacklevel=2,
)
class TestFutureSecureCORSBehavior:
"""Tests for future secure CORS behavior in v1.7 - currently skipped."""
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure default CORS configuration")
def test_future_secure_defaults(self):
"""Test that v1.7 will have secure CORS defaults."""
# Future secure behavior (uncomment in v1.7):
# with tempfile.TemporaryDirectory() as temp_dir, patch.dict(os.environ, {"LANGFLOW_CONFIG_DIR": temp_dir}):
# settings = Settings()
# # v1.7 secure defaults:
# assert settings.cors_origins == ["http://localhost:3000", "http://127.0.0.1:3000", "http://localhost:7860"]
# assert settings.cors_allow_credentials is True # Safe with specific origins
# assert settings.cors_allow_methods == ["GET", "POST", "PUT", "DELETE", "OPTIONS"]
# assert settings.cors_allow_headers == ["Content-Type", "Authorization", "X-Requested-With"]
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure wildcard rejection")
def test_future_wildcard_rejection(self):
"""Test that v1.7 will warn about or reject wildcard origins in production."""
# Future behavior (uncomment in v1.7):
# with (
# tempfile.TemporaryDirectory() as temp_dir,
# patch.dict(
# os.environ,
# {
# "LANGFLOW_CONFIG_DIR": temp_dir,
# "LANGFLOW_CORS_ORIGINS": "*",
# },
# ),
# ):
# # Should either warn strongly or reject wildcard in production mode
# with pytest.warns(UserWarning, match="SECURITY WARNING.*wildcard.*production"):
# settings = Settings()
# # Or potentially: pytest.raises(ValueError, match="Wildcard origins not allowed in production")
@pytest.mark.skip(reason="Uncomment in v1.7 - represents future secure middleware configuration")
async def test_future_secure_middleware_config(self):
"""Test that v1.7 middleware will use secure defaults."""
# Future secure middleware behavior (uncomment in v1.7):
# Test that the app creates middleware with secure defaults
# and properly validates origins in production mode
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_security_cors.py",
"license": "MIT License",
"lines": 404,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/test_setup_superuser_flow.py | import time
import pytest
from langflow.services.auth.utils import verify_password
from langflow.services.database.models.user.model import User
from langflow.services.deps import get_settings_service
from langflow.services.utils import initialize_services, setup_superuser, teardown_superuser
from lfx.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD
from sqlmodel import select
@pytest.mark.asyncio
async def test_initialize_services_creates_default_superuser_when_auto_login_true(client): # noqa: ARG001
from langflow.services.deps import session_scope
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = True
await initialize_services()
async with session_scope() as session:
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
assert user is not None
assert user.is_superuser is True
@pytest.mark.asyncio
async def test_teardown_superuser_removes_default_if_never_logged(client): # noqa: ARG001
from langflow.services.deps import session_scope
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = False
# Ensure default exists and has never logged in
await initialize_services()
async with session_scope() as session:
# Create default manually if missing
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
if not user:
user = User(
username=DEFAULT_SUPERUSER,
password=DEFAULT_SUPERUSER_PASSWORD.get_secret_value(),
is_superuser=True,
is_active=True,
)
session.add(user)
await session.commit()
await session.refresh(user)
# Ensure the user is treated as "never logged in" so teardown removes it
user.last_login_at = None
user.is_superuser = True
await session.commit()
# Run teardown and verify removal
async with session_scope() as session:
await teardown_superuser(settings, session)
async with session_scope() as session:
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
assert user is None
@pytest.mark.asyncio
async def test_teardown_superuser_preserves_logged_in_default(client): # noqa: ARG001
"""Test that teardown preserves default superuser if they have logged in."""
from datetime import datetime, timezone
from langflow.services.deps import session_scope
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = False
# Ensure default exists
await initialize_services()
async with session_scope() as session:
# Create default manually if missing and mark as logged in
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
if not user:
user = User(
username=DEFAULT_SUPERUSER,
password=DEFAULT_SUPERUSER_PASSWORD.get_secret_value(),
is_superuser=True,
is_active=True,
)
session.add(user)
await session.commit()
await session.refresh(user)
# Mark user as having logged in
user.last_login_at = datetime.now(timezone.utc)
user.is_superuser = True
await session.commit()
# Run teardown and verify user is preserved
async with session_scope() as session:
await teardown_superuser(settings, session)
async with session_scope() as session:
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
assert user is not None
assert user.is_superuser is True
@pytest.mark.asyncio
async def test_setup_superuser_with_no_configured_credentials(client): # noqa: ARG001
"""Test setup_superuser behavior when no superuser credentials are configured."""
from langflow.services.deps import session_scope
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = False
settings.auth_settings.SUPERUSER = ""
# Reset password to empty
settings.auth_settings.SUPERUSER_PASSWORD = ""
async with session_scope() as session:
# This should create a default superuser since no credentials are provided
await setup_superuser(settings, session)
# Verify default superuser was created
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
user = (await session.exec(stmt)).first()
assert user is not None
assert user.is_superuser is True
@pytest.mark.asyncio
async def test_setup_superuser_with_custom_credentials(client): # noqa: ARG001
"""Test setup_superuser behavior with custom superuser credentials."""
from langflow.services.deps import session_scope
from pydantic import SecretStr
settings = get_settings_service()
settings.auth_settings.AUTO_LOGIN = False
settings.auth_settings.SUPERUSER = "custom_admin"
settings.auth_settings.SUPERUSER_PASSWORD = SecretStr("custom_password")
# Clean DB state to avoid interference from previous tests
async with session_scope() as session:
# Ensure default can be removed by teardown (last_login_at must be None)
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
default_user = (await session.exec(stmt)).first()
if default_user:
default_user.last_login_at = None
await session.commit()
await teardown_superuser(settings, session)
# Remove any pre-existing custom_admin user
stmt = select(User).where(User.username == "custom_admin")
existing_custom = (await session.exec(stmt)).first()
if existing_custom:
await session.delete(existing_custom)
await session.commit()
async with session_scope() as session:
await setup_superuser(settings, session)
# Verify custom superuser was created
stmt = select(User).where(User.username == "custom_admin")
user = (await session.exec(stmt)).first()
assert user is not None
assert user.is_superuser is True
# Password should be hashed (not equal to the raw) and verify correctly
assert user.password != "custom_password" # noqa: S105
assert verify_password("custom_password", user.password) is True
# Verify default superuser was not created
stmt = select(User).where(User.username == DEFAULT_SUPERUSER)
default_user = (await session.exec(stmt)).first()
assert default_user is None
# Settings credentials should be scrubbed after setup
assert settings.auth_settings.SUPERUSER_PASSWORD.get_secret_value() == ""
# Cleanup: remove custom_admin to not leak state across tests
async with session_scope() as session:
stmt = select(User).where(User.username == "custom_admin")
created_custom = (await session.exec(stmt)).first()
if created_custom:
await session.delete(created_custom)
await session.commit()
@pytest.mark.asyncio
@pytest.mark.timeout(20)
async def test_should_complete_client_fixture_shutdown_within_bounded_time(client): # noqa: ARG001
"""Test that the client fixture lifespan shutdown completes in bounded time.
Bug: LifespanManager(shutdown_timeout=None) in the client fixture allows
indefinite hanging during shutdown when MCP operations (stop_project_task_group,
stop_streamable_http_manager) don't complete. On CI, this causes the entire
test job to hit its 720s timeout and get killed.
This test verifies that the fixture teardown (lifespan shutdown) completes
within a bounded time, even under normal conditions.
"""
start = time.monotonic()
# The test body is intentionally empty — we're testing that the fixture
# teardown (lifespan shutdown) completes within the pytest timeout.
# If shutdown_timeout=None and a shutdown operation hangs, the fixture
# teardown would block indefinitely, causing this test to hit the
# @pytest.mark.timeout(20) limit and FAIL.
_ = start # Consumed in teardown measurement via pytest timing
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_setup_superuser_flow.py",
"license": "MIT License",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/services/mcp_composer/factory.py | """Factory for creating MCP Composer service instances."""
from lfx.services.factory import ServiceFactory
from lfx.services.mcp_composer.service import MCPComposerService
class MCPComposerServiceFactory(ServiceFactory):
"""Factory for creating MCP Composer service instances."""
def __init__(self):
super().__init__()
self.service_class = MCPComposerService
def create(self, **kwargs): # noqa: ARG002
"""Create a new MCP Composer service instance."""
return MCPComposerService()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/services/mcp_composer/factory.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/services/mcp_composer/service.py | """MCP Composer service for proxying and orchestrating MCP servers."""
import asyncio
import json
import os
import platform
import re
import select
import socket
import subprocess
import tempfile
import typing
from collections.abc import Callable
from functools import wraps
from pathlib import Path
from typing import Any
from lfx.log.logger import logger
from lfx.services.base import Service
from lfx.services.deps import get_settings_service
GENERIC_STARTUP_ERROR_MSG = (
"MCP Composer startup failed. Check OAuth configuration and check logs for more information."
)
class MCPComposerError(Exception):
"""Base exception for MCP Composer errors."""
def __init__(self, message: str | None, project_id: str | None = None):
if not message:
message = GENERIC_STARTUP_ERROR_MSG
self.message = message
self.project_id = project_id
super().__init__(message)
class MCPComposerPortError(MCPComposerError):
"""Port is already in use or unavailable."""
class MCPComposerConfigError(MCPComposerError):
"""Invalid configuration provided."""
class MCPComposerDisabledError(MCPComposerError):
"""MCP Composer is disabled in settings."""
class MCPComposerStartupError(MCPComposerError):
"""Failed to start MCP Composer process."""
def require_composer_enabled(func: Callable) -> Callable:
"""Decorator that checks if MCP Composer is enabled before executing the method."""
@wraps(func)
def wrapper(self, *args, **kwargs):
if not get_settings_service().settings.mcp_composer_enabled:
project_id = kwargs.get("project_id")
error_msg = "MCP Composer is disabled in settings"
raise MCPComposerDisabledError(error_msg, project_id)
return func(self, *args, **kwargs)
return wrapper
class MCPComposerService(Service):
"""Service for managing per-project MCP Composer instances."""
name = "mcp_composer_service"
def __init__(self):
super().__init__()
self.project_composers: dict[
str, dict
] = {} # project_id -> {process, host, port, streamable_http_url, auth_config}
self._start_locks: dict[
str, asyncio.Lock
] = {} # Lock to prevent concurrent start operations for the same project
self._active_start_tasks: dict[
str, asyncio.Task
] = {} # Track active start tasks to cancel them when new request arrives
self._port_to_project: dict[int, str] = {} # Track which project is using which port
self._pid_to_project: dict[int, str] = {} # Track which PID belongs to which project
self._last_errors: dict[str, str] = {} # Track last error message per project for UI display
def get_last_error(self, project_id: str) -> str | None:
"""Get the last error message for a project, if any."""
return self._last_errors.get(project_id)
def set_last_error(self, project_id: str, error_message: str) -> None:
"""Set the last error message for a project."""
self._last_errors[project_id] = error_message
def clear_last_error(self, project_id: str) -> None:
"""Clear the last error message for a project."""
self._last_errors.pop(project_id, None)
def _is_port_available(self, port: int, host: str = "localhost") -> bool:
"""Check if a port is available by trying to bind to it.
Args:
port: Port number to check
host: Host to check (default: localhost)
Returns:
True if port is available (not in use), False if in use
Raises:
ValueError: If port is not in valid range (0-65535)
"""
import errno
# Validate port range before attempting bind
max_port = 65535
if not isinstance(port, int) or port < 0 or port > max_port:
msg = f"Invalid port number: {port}. Port must be between 0 and {max_port}."
raise ValueError(msg)
# Check both IPv4 and IPv6 to ensure port is truly available
# MCP Composer tries to bind on both, so we need to check both
# Check IPv4
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Don't use SO_REUSEADDR here as it can give false positives
sock.bind((host, port))
except OSError:
return False # Port is in use on IPv4
# Check IPv6 (if supported on this system)
try:
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as sock:
# Don't use SO_REUSEADDR here as it can give false positives
# Use ::1 for localhost on IPv6
ipv6_host = "::1" if host in ("localhost", "127.0.0.1") else host
sock.bind((ipv6_host, port))
except OSError as e:
# Check if it's "address already in use" error
# errno.EADDRINUSE is 48 on macOS, 98 on Linux, 10048 on Windows (WSAEADDRINUSE)
# We check both the standard errno and Windows-specific error code
if e.errno in (errno.EADDRINUSE, 10048):
return False # Port is in use on IPv6
# For other errors (e.g., IPv6 not supported, EADDRNOTAVAIL), continue
# IPv6 might not be supported on this system, which is okay
return True # Port is available on both IPv4 and IPv6 (or IPv6 not supported)
async def _kill_process_on_port(self, port: int) -> bool:
"""Kill the process using the specified port.
Cross-platform implementation supporting Windows, macOS, and Linux.
Args:
port: The port number to check
Returns:
True if a process was found and killed, False otherwise
"""
try:
await logger.adebug(f"Checking for processes using port {port}...")
os_type = platform.system()
# Platform-specific command to find PID
if os_type == "Windows":
# Use netstat on Windows - use full path to avoid PATH issues
netstat_cmd = os.path.join(os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "netstat.exe") # noqa: PTH118
result = await asyncio.to_thread(
subprocess.run,
[netstat_cmd, "-ano"],
capture_output=True,
text=True,
check=False,
)
if result.returncode == 0:
# Parse netstat output to find PID
# Format: TCP 0.0.0.0:PORT 0.0.0.0:0 LISTENING PID
windows_pids: list[int] = []
for line in result.stdout.split("\n"):
if f":{port}" in line and "LISTENING" in line:
parts = line.split()
if parts:
try:
pid = int(parts[-1])
windows_pids.append(pid)
except (ValueError, IndexError):
continue
await logger.adebug(f"Found {len(windows_pids)} process(es) using port {port}: {windows_pids}")
for pid in windows_pids:
try:
await logger.adebug(f"Attempting to kill process {pid} on port {port}...")
# Use taskkill on Windows - use full path to avoid PATH issues
taskkill_cmd = os.path.join( # noqa: PTH118
os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
)
kill_result = await asyncio.to_thread(
subprocess.run,
[taskkill_cmd, "/F", "/PID", str(pid)],
capture_output=True,
check=False,
)
if kill_result.returncode == 0:
await logger.adebug(f"Successfully killed process {pid} on port {port}")
return True
await logger.awarning(
f"taskkill returned {kill_result.returncode} for process {pid} on port {port}"
)
except Exception as e: # noqa: BLE001
await logger.aerror(f"Error killing PID {pid}: {e}")
return False
else:
# Use lsof on Unix-like systems (macOS, Linux)
result = await asyncio.to_thread(
subprocess.run,
["lsof", "-ti", f":{port}"],
capture_output=True,
text=True,
check=False,
)
await logger.adebug(f"lsof returned code {result.returncode} for port {port}")
# Extract PIDs from lsof output
lsof_output = result.stdout.strip()
lsof_errors = result.stderr.strip()
if lsof_output:
await logger.adebug(f"lsof stdout: {lsof_output}")
if lsof_errors:
await logger.adebug(f"lsof stderr: {lsof_errors}")
if result.returncode == 0 and lsof_output:
unix_pids = lsof_output.split("\n")
await logger.adebug(f"Found {len(unix_pids)} process(es) using port {port}: {unix_pids}")
for pid_str in unix_pids:
try:
pid = int(pid_str.strip())
await logger.adebug(f"Attempting to kill process {pid} on port {port}...")
# Try to kill the process
kill_result = await asyncio.to_thread(
subprocess.run,
["kill", "-9", str(pid)],
capture_output=True,
check=False,
)
if kill_result.returncode == 0:
await logger.adebug(f"Successfully sent kill signal to process {pid} on port {port}")
return True
await logger.awarning(
f"kill command returned {kill_result.returncode} for process {pid} on port {port}"
)
except (ValueError, ProcessLookupError) as e:
await logger.aerror(f"Error processing PID {pid_str}: {e}")
# If we get here, we found processes but couldn't kill any
return False
await logger.adebug(f"No process found using port {port}")
return False
except Exception as e: # noqa: BLE001
await logger.aerror(f"Error finding/killing process on port {port}: {e}")
return False
return False
async def _kill_zombie_mcp_processes(self, port: int) -> bool:
"""Kill zombie MCP Composer processes that may be stuck.
On Windows, sometimes MCP Composer processes start but fail to bind to port.
These processes become "zombies" that need to be killed before retry.
Args:
port: The port that should be used
Returns:
True if zombie processes were found and killed
"""
try:
os_type = platform.system()
if os_type != "Windows":
return False
await logger.adebug(f"Looking for zombie MCP Composer processes on Windows for port {port}...")
# First, try to find and kill any process using the port directly
# Use full path to netstat on Windows to avoid PATH issues
netstat_cmd = os.path.join(os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "netstat.exe") # noqa: PTH118
netstat_result = await asyncio.to_thread(
subprocess.run,
[netstat_cmd, "-ano"],
capture_output=True,
text=True,
check=False,
)
killed_any = False
if netstat_result.returncode == 0:
# Parse netstat output to find PIDs using our port
pids_on_port: list[int] = []
for line in netstat_result.stdout.split("\n"):
if f":{port}" in line and "LISTENING" in line:
parts = line.split()
if parts:
try:
pid = int(parts[-1])
# Only kill if not tracked by us
if pid not in self._pid_to_project:
pids_on_port.append(pid)
else:
project = self._pid_to_project[pid]
await logger.adebug(
f"Process {pid} on port {port} is tracked, skipping (project: {project})"
)
except (ValueError, IndexError):
continue
if pids_on_port:
await logger.adebug(
f"Found {len(pids_on_port)} untracked process(es) on port {port}: {pids_on_port}"
)
for pid in pids_on_port:
try:
await logger.adebug(f"Killing process {pid} on port {port}...")
# Use full path to taskkill on Windows to avoid PATH issues
taskkill_cmd = os.path.join( # noqa: PTH118
os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
)
kill_result = await asyncio.to_thread(
subprocess.run,
[taskkill_cmd, "/F", "/PID", str(pid)],
capture_output=True,
check=False,
)
if kill_result.returncode == 0:
await logger.adebug(f"Successfully killed process {pid} on port {port}")
killed_any = True
else:
stderr_output = (
kill_result.stderr.decode()
if isinstance(kill_result.stderr, bytes)
else kill_result.stderr
)
await logger.awarning(f"Failed to kill process {pid} on port {port}: {stderr_output}")
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error killing process {pid}: {e}")
# Also look for any orphaned mcp-composer processes (without checking port)
# This catches processes that failed to bind but are still running
# Use PowerShell instead of deprecated wmic.exe for Windows 10/11 compatibility
try:
# Use PowerShell to get Python processes with command line info
# Build PowerShell command to find MCP Composer processes
ps_filter = (
f"$_.Name -eq 'python.exe' -and $_.CommandLine -like '*mcp-composer*' "
f"-and ($_.CommandLine -like '*--port {port}*' -or $_.CommandLine -like '*--port={port}*')"
)
ps_cmd = (
f"Get-WmiObject Win32_Process | Where-Object {{ {ps_filter} }} | "
"Select-Object ProcessId,CommandLine | ConvertTo-Json"
)
powershell_cmd = ["powershell.exe", "-NoProfile", "-Command", ps_cmd]
ps_result = await asyncio.to_thread(
subprocess.run,
powershell_cmd,
capture_output=True,
text=True,
check=False,
timeout=5,
)
if ps_result.returncode == 0 and ps_result.stdout.strip():
try:
# PowerShell may return single object or array
processes = json.loads(ps_result.stdout)
if isinstance(processes, dict):
processes = [processes]
elif not isinstance(processes, list):
processes = []
for proc in processes:
try:
pid = int(proc.get("ProcessId", 0))
if pid <= 0 or pid in self._pid_to_project:
continue
await logger.adebug(
f"Found orphaned MCP Composer process {pid} for port {port}, killing it"
)
# Use full path to taskkill on Windows to avoid PATH issues
taskkill_cmd = os.path.join( # noqa: PTH118
os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
)
kill_result = await asyncio.to_thread(
subprocess.run,
[taskkill_cmd, "/F", "/PID", str(pid)],
capture_output=True,
check=False,
)
if kill_result.returncode == 0:
await logger.adebug(f"Successfully killed orphaned process {pid}")
killed_any = True
except (ValueError, KeyError) as e:
await logger.adebug(f"Error processing PowerShell result: {e}")
continue
except json.JSONDecodeError as e:
await logger.adebug(f"Failed to parse PowerShell output: {e}")
except asyncio.TimeoutError:
await logger.adebug("PowerShell command timed out while checking for orphaned processes")
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error using PowerShell to find orphaned processes: {e}")
if killed_any:
# Give Windows time to clean up
await logger.adebug("Waiting 3 seconds for Windows to release port...")
await asyncio.sleep(3)
return killed_any # noqa: TRY300
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error killing zombie processes: {e}")
return False
def _is_port_used_by_another_project(self, port: int, current_project_id: str) -> tuple[bool, str | None]:
"""Check if a port is being used by another project.
Args:
port: The port to check
current_project_id: The current project ID
Returns:
Tuple of (is_used_by_other, other_project_id)
"""
other_project_id = self._port_to_project.get(port)
if other_project_id and other_project_id != current_project_id:
return True, other_project_id
return False, None
async def start(self):
"""Check if the MCP Composer service is enabled."""
settings = get_settings_service().settings
if not settings.mcp_composer_enabled:
await logger.adebug(
"MCP Composer is disabled in settings. OAuth authentication will not be enabled for MCP Servers."
)
else:
await logger.adebug(
"MCP Composer is enabled in settings. OAuth authentication will be enabled for MCP Servers."
)
async def stop(self):
"""Stop all MCP Composer instances."""
for project_id in list(self.project_composers.keys()):
await self.stop_project_composer(project_id)
await logger.adebug("All MCP Composer instances stopped")
@require_composer_enabled
async def stop_project_composer(self, project_id: str):
"""Stop the MCP Composer instance for a specific project."""
if project_id not in self.project_composers:
return
# Use the same lock to ensure consistency
if project_id in self._start_locks:
async with self._start_locks[project_id]:
await self._do_stop_project_composer(project_id)
# Clean up the lock as well
del self._start_locks[project_id]
else:
# Fallback if no lock exists
await self._do_stop_project_composer(project_id)
async def _do_stop_project_composer(self, project_id: str):
"""Internal method to stop a project composer."""
if project_id not in self.project_composers:
return
composer_info = self.project_composers[project_id]
process = composer_info.get("process")
try:
if process:
try:
# Check if process is still running before trying to terminate
if process.poll() is None:
await logger.adebug(f"Terminating MCP Composer process {process.pid} for project {project_id}")
process.terminate()
# Wait longer for graceful shutdown
try:
await asyncio.wait_for(asyncio.to_thread(process.wait), timeout=2.0)
await logger.adebug(f"MCP Composer for project {project_id} terminated gracefully")
except asyncio.TimeoutError:
await logger.aerror(
f"MCP Composer for project {project_id} did not terminate gracefully, force killing"
)
await asyncio.to_thread(process.kill)
await asyncio.to_thread(process.wait)
else:
await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
await logger.adebug(f"MCP Composer stopped for project {project_id}")
except ProcessLookupError:
# Process already terminated
await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
except Exception as e: # noqa: BLE001
await logger.aerror(f"Error stopping MCP Composer for project {project_id}: {e}")
finally:
# Always clean up tracking, even if stopping failed
port = composer_info.get("port")
if port and self._port_to_project.get(port) == project_id:
self._port_to_project.pop(port, None)
await logger.adebug(f"Released port {port} from project {project_id}")
# Clean up PID tracking
if process and process.pid:
self._pid_to_project.pop(process.pid, None)
await logger.adebug(f"Released PID {process.pid} tracking for project {project_id}")
# Remove from tracking
self.project_composers.pop(project_id, None)
await logger.adebug(f"Removed tracking for project {project_id}")
async def _wait_for_process_exit(self, process):
"""Wait for a process to exit."""
await asyncio.to_thread(process.wait)
async def _read_process_output_and_extract_error(
self,
process: subprocess.Popen,
oauth_server_url: str | None,
timeout: float = 2.0,
stdout_file=None,
stderr_file=None,
) -> tuple[str, str, str]:
"""Read process output and extract user-friendly error message.
Args:
process: The subprocess to read from
oauth_server_url: OAuth server URL for error messages
timeout: Timeout for reading output
stdout_file: Optional file handle for stdout (Windows)
stderr_file: Optional file handle for stderr (Windows)
Returns:
Tuple of (stdout, stderr, error_message)
"""
stdout_content = ""
stderr_content = ""
try:
# On Windows with temp files, read from files instead of pipes
if stdout_file and stderr_file:
# Close file handles to flush and allow reading
try:
stdout_file.close()
stderr_file.close()
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error closing temp files: {e}")
# Read from temp files using asyncio.to_thread
try:
def read_file(filepath):
return Path(filepath).read_bytes()
stdout_bytes = await asyncio.to_thread(read_file, stdout_file.name)
stdout_content = stdout_bytes.decode("utf-8", errors="replace") if stdout_bytes else ""
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error reading stdout file: {e}")
try:
def read_file(filepath):
return Path(filepath).read_bytes()
stderr_bytes = await asyncio.to_thread(read_file, stderr_file.name)
stderr_content = stderr_bytes.decode("utf-8", errors="replace") if stderr_bytes else ""
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error reading stderr file: {e}")
# Clean up temp files
try:
Path(stdout_file.name).unlink()
Path(stderr_file.name).unlink()
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error removing temp files: {e}")
else:
# Use asyncio.to_thread to avoid blocking the event loop
# Process returns bytes, decode with error handling
stdout_bytes, stderr_bytes = await asyncio.to_thread(process.communicate, timeout=timeout)
stdout_content = stdout_bytes.decode("utf-8", errors="replace") if stdout_bytes else ""
stderr_content = stderr_bytes.decode("utf-8", errors="replace") if stderr_bytes else ""
except subprocess.TimeoutExpired:
process.kill()
error_msg = self._extract_error_message("", "", oauth_server_url)
return "", "", error_msg
error_msg = self._extract_error_message(stdout_content, stderr_content, oauth_server_url)
return stdout_content, stderr_content, error_msg
async def _read_stream_non_blocking(self, stream, stream_name: str) -> str:
"""Read from a stream without blocking and log the content.
Args:
stream: The stream to read from (stdout or stderr)
stream_name: Name of the stream for logging ("stdout" or "stderr")
Returns:
The content read from the stream (empty string if nothing available)
"""
if not stream:
return ""
try:
# On Windows, select.select() doesn't work with pipes (only sockets)
# Use platform-specific approach
os_type = platform.system()
if os_type == "Windows":
# On Windows, select.select() doesn't work with pipes
# Skip stream reading during monitoring - output will be captured when process terminates
# This prevents blocking on peek() which can cause the monitoring loop to hang
return ""
# On Unix-like systems, use select
if select.select([stream], [], [], 0)[0]:
line_bytes = stream.readline()
if line_bytes:
# Decode bytes with error handling
line = line_bytes.decode("utf-8", errors="replace") if isinstance(line_bytes, bytes) else line_bytes
stripped = line.strip()
if stripped:
# Log errors at error level, everything else at debug
if stream_name == "stderr" and ("ERROR" in stripped or "error" in stripped):
await logger.aerror(f"MCP Composer {stream_name}: {stripped}")
else:
await logger.adebug(f"MCP Composer {stream_name}: {stripped}")
return stripped
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error reading {stream_name}: {e}")
return ""
async def _ensure_port_available(self, port: int, current_project_id: str) -> None:
"""Ensure a port is available, only killing untracked processes.
Args:
port: The port number to ensure is available
current_project_id: The project ID requesting the port
Raises:
MCPComposerPortError: If port cannot be made available
MCPComposerConfigError: If port is invalid
"""
try:
is_port_available = self._is_port_available(port)
await logger.adebug(f"Port {port} availability check: {is_port_available}")
except (ValueError, OverflowError, TypeError) as e:
# Port validation failed - invalid port number or type
# ValueError: from our validation
# OverflowError: from socket.bind() when port > 65535
# TypeError: when port is not an integer
error_msg = f"Invalid port number: {port}. Port must be an integer between 0 and 65535."
await logger.aerror(f"Invalid port for project {current_project_id}: {e}")
raise MCPComposerConfigError(error_msg, current_project_id) from e
if not is_port_available:
# Check if the port is being used by a tracked project
is_used_by_other, other_project_id = self._is_port_used_by_another_project(port, current_project_id)
if is_used_by_other and other_project_id:
# Port is being used by another tracked project
# Check if we can take ownership (e.g., the other project is failing)
other_composer = self.project_composers.get(other_project_id)
if other_composer and other_composer.get("process"):
other_process = other_composer["process"]
# If the other process is still running and healthy, don't kill it
if other_process.poll() is None:
await logger.aerror(
f"Port {port} requested by project {current_project_id} is already in use by "
f"project {other_project_id}. Will not kill active MCP Composer process."
)
port_error_msg = (
f"Port {port} is already in use by another project. "
f"Please choose a different port (e.g., {port + 1}) "
f"or disable OAuth on the other project first."
)
raise MCPComposerPortError(port_error_msg, current_project_id)
# Process died but port tracking wasn't cleaned up - allow takeover
await logger.adebug(
f"Port {port} was tracked to project {other_project_id} but process died. "
f"Allowing project {current_project_id} to take ownership."
)
# Clean up the old tracking
await self._do_stop_project_composer(other_project_id)
# Check if port is used by a process owned by the current project (e.g., stuck in startup loop)
port_owner_project = self._port_to_project.get(port)
if port_owner_project == current_project_id:
# Port is owned by current project - safe to kill
await logger.adebug(
f"Port {port} is in use by current project {current_project_id} (likely stuck in startup). "
f"Killing process to retry."
)
killed = await self._kill_process_on_port(port)
if killed:
await logger.adebug(
f"Successfully killed own process on port {port}. Waiting for port to be released..."
)
await asyncio.sleep(2)
is_port_available = self._is_port_available(port)
if not is_port_available:
await logger.aerror(f"Port {port} is still in use after killing own process.")
port_error_msg = f"Port {port} is still in use after killing process"
raise MCPComposerPortError(port_error_msg)
else:
# Port is in use by unknown process - don't kill it (security concern)
await logger.aerror(
f"Port {port} is in use by an unknown process (not owned by Langflow). "
f"Will not kill external application for security reasons."
)
port_error_msg = (
f"Port {port} is already in use by another application. "
f"Please choose a different port (e.g., {port + 1}) or free up the port manually."
)
raise MCPComposerPortError(port_error_msg, current_project_id)
await logger.adebug(f"Port {port} is available, proceeding with MCP Composer startup")
async def _log_startup_error_details(
self,
project_id: str,
cmd: list[str],
host: str,
port: int,
stdout: str = "",
stderr: str = "",
error_msg: str = "",
exit_code: int | None = None,
pid: int | None = None,
) -> None:
"""Log detailed startup error information.
Args:
project_id: The project ID
cmd: The command that was executed
host: Target host
port: Target port
stdout: Standard output from the process
stderr: Standard error from the process
error_msg: User-friendly error message
exit_code: Process exit code (if terminated)
pid: Process ID (if still running)
"""
await logger.aerror(f"MCP Composer startup failed for project {project_id}:")
if exit_code is not None:
await logger.aerror(f" - Process died with exit code: {exit_code}")
if pid is not None:
await logger.aerror(f" - Process is running (PID: {pid}) but failed to bind to port {port}")
await logger.aerror(f" - Target: {host}:{port}")
# Obfuscate secrets in command before logging
safe_cmd = self._obfuscate_command_secrets(cmd)
await logger.aerror(f" - Command: {' '.join(safe_cmd)}")
if stderr.strip():
await logger.aerror(f" - Error output: {stderr.strip()}")
if stdout.strip():
await logger.aerror(f" - Standard output: {stdout.strip()}")
if error_msg:
await logger.aerror(f" - Error message: {error_msg}")
def _validate_oauth_settings(self, auth_config: dict[str, Any]) -> None:
"""Validate that all required OAuth settings are present and non-empty.
Raises:
MCPComposerConfigError: If any required OAuth field is missing or empty
"""
if auth_config.get("auth_type") != "oauth":
return
required_fields = [
"oauth_host",
"oauth_port",
"oauth_server_url",
"oauth_auth_url",
"oauth_token_url",
"oauth_client_id",
"oauth_client_secret",
]
missing_fields = []
empty_fields = []
for field in required_fields:
value = auth_config.get(field)
if value is None:
missing_fields.append(field)
elif not str(value).strip():
empty_fields.append(field)
error_parts = []
if missing_fields:
error_parts.append(f"Missing required fields: {', '.join(missing_fields)}")
if empty_fields:
error_parts.append(f"Empty required fields: {', '.join(empty_fields)}")
if error_parts:
config_error_msg = f"Invalid OAuth configuration: {'; '.join(error_parts)}"
raise MCPComposerConfigError(config_error_msg)
@staticmethod
def _normalize_config_value(value: Any) -> Any:
"""Normalize a config value (None or empty string becomes None).
Args:
value: The value to normalize
Returns:
None if value is None or empty string, otherwise the value
"""
return None if (value is None or value == "") else value
def _has_auth_config_changed(self, existing_auth: dict[str, Any] | None, new_auth: dict[str, Any] | None) -> bool:
"""Check if auth configuration has changed in a way that requires restart."""
if not existing_auth and not new_auth:
return False
if not existing_auth or not new_auth:
return True
auth_type = new_auth.get("auth_type", "")
# Auth type changed?
if existing_auth.get("auth_type") != auth_type:
return True
# Define which fields to check for each auth type
fields_to_check = []
if auth_type == "oauth":
# Get all oauth_* fields plus host/port from both configs
all_keys = set(existing_auth.keys()) | set(new_auth.keys())
fields_to_check = [k for k in all_keys if k.startswith("oauth_") or k in ["host", "port"]]
elif auth_type == "apikey":
fields_to_check = ["api_key"]
# Compare relevant fields
for field in fields_to_check:
old_normalized = self._normalize_config_value(existing_auth.get(field))
new_normalized = self._normalize_config_value(new_auth.get(field))
if old_normalized != new_normalized:
return True
return False
def _obfuscate_command_secrets(self, cmd: list[str]) -> list[str]:
"""Obfuscate secrets in command arguments for safe logging.
Args:
cmd: List of command arguments
Returns:
List of command arguments with secrets replaced with ***REDACTED***
"""
safe_cmd = []
i = 0
while i < len(cmd):
arg = cmd[i]
# Check if this is --env followed by a secret key
if arg == "--env" and i + 2 < len(cmd):
env_key = cmd[i + 1]
env_value = cmd[i + 2]
if any(secret in env_key.lower() for secret in ["secret", "key", "token"]):
# Redact the value
safe_cmd.extend([arg, env_key, "***REDACTED***"])
i += 3 # Skip all three: --env, key, and value
continue
# Not a secret, keep as-is
safe_cmd.extend([arg, env_key, env_value])
i += 3
continue
# Regular argument
safe_cmd.append(arg)
i += 1
return safe_cmd
def _extract_error_message(
self, stdout_content: str, stderr_content: str, oauth_server_url: str | None = None
) -> str:
"""Attempts to extract a user-friendly error message from subprocess output.
Args:
stdout_content: Standard output from the subprocess
stderr_content: Standard error from the subprocess
oauth_server_url: OAuth server URL
Returns:
User-friendly error message or a generic message if no specific pattern is found
"""
# Combine both outputs and clean them up
combined_output = (stderr_content + "\n" + stdout_content).strip()
if not oauth_server_url:
oauth_server_url = "OAuth server URL"
# Common error patterns with user-friendly messages
error_patterns = [
(r"address already in use", f"Address {oauth_server_url} is already in use."),
(r"permission denied", f"Permission denied starting MCP Composer on address {oauth_server_url}."),
(
r"connection refused",
f"Connection refused on address {oauth_server_url}. The address may be blocked or unavailable.",
),
(
r"bind.*failed",
f"Failed to bind to address {oauth_server_url}. The address may be in use or unavailable.",
),
(r"timeout", "MCP Composer startup timed out. Please try again."),
(r"invalid.*configuration", "Invalid MCP Composer configuration. Please check your settings."),
(r"oauth.*error", "OAuth configuration error. Please check your OAuth settings."),
(r"authentication.*failed", "Authentication failed. Please check your credentials."),
]
# Check for specific error patterns first
for pattern, friendly_msg in error_patterns:
if re.search(pattern, combined_output, re.IGNORECASE):
return friendly_msg
return GENERIC_STARTUP_ERROR_MSG
@require_composer_enabled
async def start_project_composer(
self,
project_id: str,
streamable_http_url: str,
auth_config: dict[str, Any] | None,
max_retries: int = 3,
max_startup_checks: int = 40,
startup_delay: float = 2.0,
*,
legacy_sse_url: str | None = None,
) -> None:
"""Start an MCP Composer instance for a specific project.
Args:
project_id: The project ID
streamable_http_url: Streamable HTTP endpoint for the remote Langflow MCP server
auth_config: Authentication configuration
max_retries: Maximum number of retry attempts (default: 3)
max_startup_checks: Number of checks per retry attempt (default: 40)
startup_delay: Delay between checks in seconds (default: 2.0)
legacy_sse_url: Optional legacy SSE URL used for backward compatibility
Raises:
MCPComposerError: Various specific errors if startup fails
"""
# Cancel any active start operation for this project
if project_id in self._active_start_tasks:
active_task = self._active_start_tasks[project_id]
if not active_task.done():
await logger.adebug(f"Cancelling previous MCP Composer start operation for project {project_id}")
active_task.cancel()
try:
await active_task
except asyncio.CancelledError:
await logger.adebug(f"Previous start operation for project {project_id} cancelled successfully")
finally:
# Clean up the cancelled task from tracking
del self._active_start_tasks[project_id]
# Create and track the current task
current_task = asyncio.current_task()
if not current_task:
await logger.awarning(
f"Could not get current task for project {project_id}. "
f"Concurrent start operations may not be properly cancelled."
)
else:
self._active_start_tasks[project_id] = current_task
try:
await self._do_start_project_composer(
project_id,
streamable_http_url,
auth_config,
max_retries,
max_startup_checks,
startup_delay,
legacy_sse_url=legacy_sse_url,
)
finally:
# Clean up the task reference when done
if project_id in self._active_start_tasks and self._active_start_tasks[project_id] == current_task:
del self._active_start_tasks[project_id]
async def _do_start_project_composer(
self,
project_id: str,
streamable_http_url: str,
auth_config: dict[str, Any] | None,
max_retries: int = 3,
max_startup_checks: int = 40,
startup_delay: float = 2.0,
*,
legacy_sse_url: str | None = None,
) -> None:
"""Internal method to start an MCP Composer instance.
Args:
project_id: The project ID
streamable_http_url: Streamable HTTP endpoint for the remote Langflow MCP server
auth_config: Authentication configuration
max_retries: Maximum number of retry attempts (default: 3)
max_startup_checks: Number of checks per retry attempt (default: 40)
startup_delay: Delay between checks in seconds (default: 2.0)
legacy_sse_url: Optional legacy SSE URL used for backward compatibility
Raises:
MCPComposerError: Various specific errors if startup fails
"""
legacy_sse_url = legacy_sse_url or f"{streamable_http_url.rstrip('/')}/sse"
if not auth_config:
no_auth_error_msg = "No auth settings provided"
raise MCPComposerConfigError(no_auth_error_msg, project_id)
# Validate OAuth settings early to provide clear error messages
self._validate_oauth_settings(auth_config)
project_host = auth_config.get("oauth_host") if auth_config else "unknown"
project_port = auth_config.get("oauth_port") if auth_config else "unknown"
await logger.adebug(f"Starting MCP Composer for project {project_id} on {project_host}:{project_port}")
# Use a per-project lock to prevent race conditions
if project_id not in self._start_locks:
self._start_locks[project_id] = asyncio.Lock()
async with self._start_locks[project_id]:
# Check if already running (double-check after acquiring lock)
project_port_str = auth_config.get("oauth_port")
if not project_port_str:
no_port_error_msg = "No OAuth port provided"
raise MCPComposerConfigError(no_port_error_msg, project_id)
try:
project_port = int(project_port_str)
except (ValueError, TypeError) as e:
port_error_msg = f"Invalid OAuth port: {project_port_str}"
raise MCPComposerConfigError(port_error_msg, project_id) from e
project_host = auth_config.get("oauth_host")
if not project_host:
no_host_error_msg = "No OAuth host provided"
raise MCPComposerConfigError(no_host_error_msg, project_id)
if project_id in self.project_composers:
composer_info = self.project_composers[project_id]
process = composer_info.get("process")
existing_auth = composer_info.get("auth_config", {})
existing_port = composer_info.get("port")
# Check if process is still running
if process and process.poll() is None:
# Process is running - only restart if config changed
auth_changed = self._has_auth_config_changed(existing_auth, auth_config)
if auth_changed:
await logger.adebug(f"Config changed for project {project_id}, restarting MCP Composer")
await self._do_stop_project_composer(project_id)
else:
await logger.adebug(
f"MCP Composer already running for project {project_id} with current config"
)
return # Already running with correct config
else:
# Process died or never started properly, restart it
await logger.adebug(f"MCP Composer process died for project {project_id}, restarting")
await self._do_stop_project_composer(project_id)
# Also kill any process that might be using the old port
if existing_port:
try:
await asyncio.wait_for(self._kill_process_on_port(existing_port), timeout=5.0)
except asyncio.TimeoutError:
await logger.aerror(f"Timeout while killing process on port {existing_port}")
# Retry loop: try starting the process multiple times
last_error = None
try:
# Before first attempt, try to kill any zombie MCP Composer processes
# This is a best-effort operation - don't fail startup if it errors
try:
await logger.adebug(
f"Checking for zombie MCP Composer processes on port {project_port} before startup..."
)
zombies_killed = await self._kill_zombie_mcp_processes(project_port)
if zombies_killed:
await logger.adebug(f"Killed zombie processes, port {project_port} should now be free")
except Exception as zombie_error: # noqa: BLE001
# Log but continue - zombie cleanup is optional
await logger.awarning(
f"Failed to check/kill zombie processes (non-fatal): {zombie_error}. Continuing with startup..."
)
# Ensure port is available (only kill untracked processes)
try:
await self._ensure_port_available(project_port, project_id)
except (MCPComposerPortError, MCPComposerConfigError) as e:
# Port/config error before starting - store and raise immediately (no retries)
self._last_errors[project_id] = e.message
raise
for retry_attempt in range(1, max_retries + 1):
try:
await logger.adebug(
f"Starting MCP Composer for project {project_id} (attempt {retry_attempt}/{max_retries})"
)
# Re-check port availability before each attempt to prevent race conditions
if retry_attempt > 1:
await logger.adebug(f"Re-checking port {project_port} availability before retry...")
await self._ensure_port_available(project_port, project_id)
process = await self._start_project_composer_process(
project_id,
project_host,
project_port,
streamable_http_url,
auth_config,
max_startup_checks,
startup_delay,
legacy_sse_url=legacy_sse_url,
)
except MCPComposerError as e:
last_error = e
await logger.aerror(
f"MCP Composer startup attempt {retry_attempt}/{max_retries} failed "
f"for project {project_id}: {e.message}"
)
# For config/port errors, don't retry - fail immediately
if isinstance(e, (MCPComposerConfigError, MCPComposerPortError)):
await logger.aerror(
f"Configuration or port error for project {project_id}, not retrying: {e.message}"
)
raise # Re-raise to exit retry loop immediately
# Clean up any partially started process before retrying
if project_id in self.project_composers:
await self._do_stop_project_composer(project_id)
# If not the last attempt, wait and try to clean up zombie processes
if retry_attempt < max_retries:
await logger.adebug(f"Waiting 2 seconds before retry attempt {retry_attempt + 1}...")
await asyncio.sleep(2)
# On Windows, try to kill any zombie MCP Composer processes for this port
# This is a best-effort operation - don't fail retry if it errors
try:
msg = f"Checking for zombie MCP Composer processes on port {project_port}"
await logger.adebug(msg)
zombies_killed = await self._kill_zombie_mcp_processes(project_port)
if zombies_killed:
await logger.adebug(f"Killed zombie processes, port {project_port} should be free")
except Exception as retry_zombie_error: # noqa: BLE001
# Log but continue - zombie cleanup is optional
msg = f"Failed to check/kill zombie processes during retry: {retry_zombie_error}"
await logger.awarning(msg)
else:
# Success! Store the composer info and register the port and PID
self.project_composers[project_id] = {
"process": process,
"host": project_host,
"port": project_port,
"streamable_http_url": streamable_http_url,
"legacy_sse_url": legacy_sse_url,
"sse_url": legacy_sse_url,
"auth_config": auth_config,
}
self._port_to_project[project_port] = project_id
self._pid_to_project[process.pid] = project_id
# Clear any previous error on success
self.clear_last_error(project_id)
await logger.adebug(
f"MCP Composer started for project {project_id} on port {project_port} "
f"(PID: {process.pid}) after {retry_attempt} attempt(s)"
)
return # Success!
# All retries failed, raise the last error
if last_error:
await logger.aerror(
f"MCP Composer failed to start for project {project_id} after {max_retries} attempts"
)
# Store the error message for later retrieval
self._last_errors[project_id] = last_error.message
raise last_error
except asyncio.CancelledError:
# Operation was cancelled, clean up any started process
await logger.adebug(f"MCP Composer start operation for project {project_id} was cancelled")
if project_id in self.project_composers:
await self._do_stop_project_composer(project_id)
raise # Re-raise to propagate cancellation
async def _start_project_composer_process(
self,
project_id: str,
host: str,
port: int,
streamable_http_url: str,
auth_config: dict[str, Any] | None = None,
max_startup_checks: int = 40,
startup_delay: float = 2.0,
*,
legacy_sse_url: str | None = None,
) -> subprocess.Popen:
"""Start the MCP Composer subprocess for a specific project.
Args:
project_id: The project ID
host: Host to bind to
port: Port to bind to
streamable_http_url: Streamable HTTP endpoint to connect to
auth_config: Authentication configuration
max_startup_checks: Number of port binding checks (default: 40)
startup_delay: Delay between checks in seconds (default: 2.0)
legacy_sse_url: Optional legacy SSE URL used for backward compatibility when required by tooling
Returns:
The started subprocess
Raises:
MCPComposerStartupError: If startup fails
"""
settings = get_settings_service().settings
# Some composer tooling still uses the --sse-url flag for backwards compatibility even in HTTP mode.
effective_legacy_sse_url = legacy_sse_url or f"{streamable_http_url.rstrip('/')}/sse"
cmd = [
"uvx",
f"mcp-composer{settings.mcp_composer_version}",
"--port",
str(port),
"--host",
host,
"--mode",
"http",
"--endpoint",
streamable_http_url,
"--sse-url",
effective_legacy_sse_url,
"--disable-composer-tools",
]
# Set environment variables
env = os.environ.copy()
oauth_server_url = auth_config.get("oauth_server_url") if auth_config else None
if auth_config:
auth_type = auth_config.get("auth_type")
if auth_type == "oauth":
cmd.extend(["--auth_type", "oauth"])
# Add OAuth environment variables as command line arguments
cmd.extend(["--env", "ENABLE_OAUTH", "True"])
# Map auth config to environment variables for OAuth
# Note: oauth_host and oauth_port are passed both via --host/--port CLI args
# (for server binding) and as environment variables (for OAuth flow)
oauth_env_mapping = {
"oauth_host": "OAUTH_HOST",
"oauth_port": "OAUTH_PORT",
"oauth_server_url": "OAUTH_SERVER_URL",
"oauth_callback_url": "OAUTH_CALLBACK_URL",
"oauth_client_id": "OAUTH_CLIENT_ID",
"oauth_client_secret": "OAUTH_CLIENT_SECRET", # pragma: allowlist secret
"oauth_auth_url": "OAUTH_AUTH_URL",
"oauth_token_url": "OAUTH_TOKEN_URL",
"oauth_mcp_scope": "OAUTH_MCP_SCOPE",
"oauth_provider_scope": "OAUTH_PROVIDER_SCOPE",
}
# Backwards compatibility: if oauth_callback_url not set, try oauth_callback_path
if ("oauth_callback_url" not in auth_config or not auth_config.get("oauth_callback_url")) and (
"oauth_callback_path" in auth_config and auth_config.get("oauth_callback_path")
):
auth_config["oauth_callback_url"] = auth_config["oauth_callback_path"]
# Add environment variables as command line arguments
# Only set non-empty values to avoid Pydantic validation errors
for config_key, env_key in oauth_env_mapping.items():
value = auth_config.get(config_key)
if value is not None and str(value).strip():
cmd.extend(["--env", env_key, str(value)])
# Log the command being executed (with secrets obfuscated)
safe_cmd = self._obfuscate_command_secrets(cmd)
await logger.adebug(f"Starting MCP Composer with command: {' '.join(safe_cmd)}")
# Start the subprocess with both stdout and stderr captured
# On Windows, use temp files to avoid pipe buffering issues that can cause process to hang
stdout_handle: int | typing.IO[bytes] = subprocess.PIPE
stderr_handle: int | typing.IO[bytes] = subprocess.PIPE
stdout_file = None
stderr_file = None
if platform.system() == "Windows":
# Create temp files for stdout/stderr on Windows to avoid pipe deadlocks
# Note: We intentionally don't use context manager as we need files to persist
# for the subprocess and be cleaned up manually later
stdout_file = tempfile.NamedTemporaryFile( # noqa: SIM115
mode="w+b", delete=False, prefix=f"mcp_composer_{project_id}_stdout_", suffix=".log"
)
stderr_file = tempfile.NamedTemporaryFile( # noqa: SIM115
mode="w+b", delete=False, prefix=f"mcp_composer_{project_id}_stderr_", suffix=".log"
)
stdout_handle = stdout_file
stderr_handle = stderr_file
stdout_name = stdout_file.name
stderr_name = stderr_file.name
await logger.adebug(f"Using temp files for MCP Composer logs: stdout={stdout_name}, stderr={stderr_name}")
process = subprocess.Popen(cmd, env=env, stdout=stdout_handle, stderr=stderr_handle) # noqa: ASYNC220, S603
# Monitor the process startup with multiple checks
process_running = False
port_bound = False
await logger.adebug(
f"MCP Composer process started with PID {process.pid}, monitoring startup for project {project_id}..."
)
try:
for check in range(max_startup_checks):
await asyncio.sleep(startup_delay)
# Check if process is still running
poll_result = process.poll()
startup_error_msg = None
if poll_result is not None:
# Process terminated, get the error output
(
stdout_content,
stderr_content,
startup_error_msg,
) = await self._read_process_output_and_extract_error(
process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
)
await self._log_startup_error_details(
project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, poll_result
)
raise MCPComposerStartupError(startup_error_msg, project_id)
# Process is still running, check if port is bound
port_bound = not self._is_port_available(port)
if port_bound:
await logger.adebug(
f"MCP Composer for project {project_id} bound to port {port} "
f"(check {check + 1}/{max_startup_checks})"
)
process_running = True
break
await logger.adebug(
f"MCP Composer for project {project_id} not yet bound to port {port} "
f"(check {check + 1}/{max_startup_checks})"
)
# Try to read any available stderr/stdout without blocking to see what's happening
await self._read_stream_non_blocking(process.stderr, "stderr")
await self._read_stream_non_blocking(process.stdout, "stdout")
except asyncio.CancelledError:
# Operation was cancelled, kill the process and cleanup
await logger.adebug(
f"MCP Composer process startup cancelled for project {project_id}, terminating process {process.pid}"
)
try:
process.terminate()
# Wait for graceful termination with timeout
try:
await asyncio.wait_for(asyncio.to_thread(process.wait), timeout=2.0)
except asyncio.TimeoutError:
# Force kill if graceful termination times out
await logger.adebug(f"Process {process.pid} did not terminate gracefully, force killing")
await asyncio.to_thread(process.kill)
await asyncio.to_thread(process.wait)
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error terminating process during cancellation: {e}")
raise # Re-raise to propagate cancellation
# After all checks
if not process_running or not port_bound:
# Get comprehensive error information
poll_result = process.poll()
if poll_result is not None:
# Process died
stdout_content, stderr_content, startup_error_msg = await self._read_process_output_and_extract_error(
process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
)
await self._log_startup_error_details(
project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, poll_result
)
raise MCPComposerStartupError(startup_error_msg, project_id)
# Process running but port not bound
await logger.aerror(
f" - Checked {max_startup_checks} times over {max_startup_checks * startup_delay} seconds"
)
# Get any available output before terminating
process.terminate()
stdout_content, stderr_content, startup_error_msg = await self._read_process_output_and_extract_error(
process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
)
await self._log_startup_error_details(
project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, pid=process.pid
)
raise MCPComposerStartupError(startup_error_msg, project_id)
# Close the pipes/files if everything is successful
if stdout_file and stderr_file:
# Clean up temp files on success
try:
stdout_file.close()
stderr_file.close()
Path(stdout_file.name).unlink()
Path(stderr_file.name).unlink()
except Exception as e: # noqa: BLE001
await logger.adebug(f"Error cleaning up temp files on success: {e}")
else:
if process.stdout:
process.stdout.close()
if process.stderr:
process.stderr.close()
return process
@require_composer_enabled
def get_project_composer_port(self, project_id: str) -> int | None:
"""Get the port number for a specific project's composer."""
if project_id not in self.project_composers:
return None
return self.project_composers[project_id]["port"]
@require_composer_enabled
async def teardown(self) -> None:
"""Clean up resources when the service is torn down."""
await logger.adebug("Tearing down MCP Composer service...")
await self.stop()
await logger.adebug("MCP Composer service teardown complete")
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/services/mcp_composer/service.py",
"license": "MIT License",
"lines": 1260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/backend/tests/unit/test_langflow_logging_compatibility.py | """Test langflow.logging backwards compatibility and integration.
This test ensures that langflow.logging works correctly and that there are no
conflicts with the new lfx.logging backwards compatibility module.
"""
import pytest
def test_langflow_logging_imports():
"""Test that langflow.logging can be imported and works correctly."""
try:
from langflow.logging import configure, logger
assert configure is not None
assert logger is not None
assert callable(configure)
except ImportError as e:
pytest.fail(f"langflow.logging should be importable: {e}")
def test_langflow_logging_functionality():
"""Test that langflow.logging functions work correctly."""
from langflow.logging import configure, logger
# Should be able to configure
try:
configure(log_level="INFO")
except Exception as e:
pytest.fail(f"configure should work: {e}")
# Should be able to log
try:
logger.info("Test message from langflow.logging")
except Exception as e:
pytest.fail(f"logger should work: {e}")
def test_langflow_logging_has_expected_exports():
"""Test that langflow.logging has the expected exports."""
import langflow.logging
assert hasattr(langflow.logging, "configure")
assert hasattr(langflow.logging, "logger")
assert hasattr(langflow.logging, "disable_logging")
assert hasattr(langflow.logging, "enable_logging")
# Check __all__
assert hasattr(langflow.logging, "__all__")
expected_exports = {"configure", "logger", "disable_logging", "enable_logging"}
assert set(langflow.logging.__all__) == expected_exports
def test_langflow_logging_specific_functions():
"""Test langflow.logging specific functions (disable_logging, enable_logging)."""
from langflow.logging import disable_logging, enable_logging
assert callable(disable_logging)
assert callable(enable_logging)
# Note: These functions have implementation issues (trying to call methods
# that don't exist on structlog), but they should at least be importable
# and callable. The actual functionality is a separate issue from the
# backwards compatibility we're testing.
def test_no_conflict_with_lfx_logging():
"""Test that langflow.logging and lfx.logging don't conflict."""
# Import both
from langflow.logging import configure as lf_configure
from langflow.logging import logger as lf_logger
from lfx.logging import configure as lfx_configure
from lfx.logging import logger as lfx_logger
# They should be the same underlying objects since langflow.logging imports from lfx.log.logger
# and lfx.logging re-exports from lfx.log.logger
# Note: Due to import order and module initialization, object identity may vary,
# but functionality should be equivalent
assert callable(lf_configure)
assert callable(lfx_configure)
assert hasattr(lf_logger, "info")
assert hasattr(lfx_logger, "info")
# Test that both work without conflicts
lf_configure(log_level="INFO")
lfx_configure(log_level="INFO")
lf_logger.info("Test from langflow.logging")
lfx_logger.info("Test from lfx.logging")
def test_langflow_logging_imports_from_lfx():
"""Test that langflow.logging correctly imports from lfx."""
from langflow.logging import configure, logger
from lfx.log.logger import configure as lfx_configure
from lfx.log.logger import logger as lfx_logger
# langflow.logging should import equivalent objects from lfx.log.logger
# Due to module initialization order, object identity may vary
assert callable(configure)
assert callable(lfx_configure)
assert hasattr(logger, "info")
assert hasattr(lfx_logger, "info")
# Test functionality equivalence
configure(log_level="DEBUG")
logger.debug("Test from langflow.logging")
lfx_configure(log_level="DEBUG")
lfx_logger.debug("Test from lfx.log.logger")
def test_backwards_compatibility_scenario():
"""Test the complete backwards compatibility scenario."""
# This tests the scenario where:
# 1. langflow.logging exists and imports from lfx.log.logger
# 2. lfx.logging now exists (new) and re-exports from lfx.log.logger
# 3. Both should work without conflicts
# Import from all paths
from langflow.logging import configure as lf_configure
from langflow.logging import logger as lf_logger
from lfx.log.logger import configure as orig_configure
from lfx.log.logger import logger as orig_logger
from lfx.logging import configure as lfx_configure
from lfx.logging import logger as lfx_logger
# All should be callable/have expected methods
assert callable(lf_configure)
assert callable(lfx_configure)
assert callable(orig_configure)
assert hasattr(lf_logger, "error")
assert hasattr(lfx_logger, "info")
assert hasattr(orig_logger, "debug")
# All should work without conflicts
lf_configure(log_level="ERROR")
lf_logger.error("Message from langflow.logging")
lfx_configure(log_level="INFO")
lfx_logger.info("Message from lfx.logging")
orig_configure(log_level="DEBUG")
orig_logger.debug("Message from lfx.log.logger")
def test_importing_langflow_logging_in_langflow():
"""Test that langflow.logging can be imported and used in langflow context without errors.
This is similar to test_importing_langflow_logging_in_lfx but tests the langflow side
using create_class to validate component creation with langflow.logging imports.
"""
from textwrap import dedent
from lfx.custom.validate import create_class
# Test that langflow.logging can be used in component code created via create_class
code = dedent("""
from langflow.logging import logger, configure
from langflow.logging.logger import logger
from langflow.custom import Component
class TestLangflowLoggingComponent(Component):
def some_method(self):
# Test that both logger and configure work in langflow context
configure(log_level="INFO")
logger.info("Test message from langflow component")
# Test different log levels
logger.debug("Debug message")
logger.warning("Warning message")
logger.error("Error message")
return "langflow_logging_success"
""")
result = create_class(code, "TestLangflowLoggingComponent")
assert result.__name__ == "TestLangflowLoggingComponent"
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_langflow_logging_compatibility.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/src/lfx/components/amazon/amazon_bedrock_converse.py | from langflow.field_typing import LanguageModel
from langflow.inputs.inputs import BoolInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
from langflow.io import DictInput, DropdownInput
from lfx.base.models.aws_constants import AWS_REGIONS, AWS_MODEL_IDs
from lfx.base.models.model import LCModelComponent
class AmazonBedrockConverseComponent(LCModelComponent):
display_name: str = "Amazon Bedrock Converse"
description: str = (
"Generate text using Amazon Bedrock LLMs with the modern Converse API for improved conversation handling."
)
icon = "Amazon"
name = "AmazonBedrockConverseModel"
beta = True
inputs = [
*LCModelComponent.get_base_inputs(),
DropdownInput(
name="model_id",
display_name="Model ID",
options=AWS_MODEL_IDs,
value="anthropic.claude-3-5-sonnet-20241022-v2:0",
info="List of available model IDs to choose from.",
),
SecretStrInput(
name="aws_access_key_id",
display_name="AWS Access Key ID",
info="The access key for your AWS account. "
"Usually set in Python code as the environment variable 'AWS_ACCESS_KEY_ID'.",
value="AWS_ACCESS_KEY_ID",
required=True,
),
SecretStrInput(
name="aws_secret_access_key",
display_name="AWS Secret Access Key",
info="The secret key for your AWS account. "
"Usually set in Python code as the environment variable 'AWS_SECRET_ACCESS_KEY'.",
value="AWS_SECRET_ACCESS_KEY",
required=True,
),
SecretStrInput(
name="aws_session_token",
display_name="AWS Session Token",
advanced=True,
info="The session key for your AWS account. "
"Only needed for temporary credentials. "
"Usually set in Python code as the environment variable 'AWS_SESSION_TOKEN'.",
load_from_db=False,
),
SecretStrInput(
name="credentials_profile_name",
display_name="Credentials Profile Name",
advanced=True,
info="The name of the profile to use from your "
"~/.aws/credentials file. "
"If not provided, the default profile will be used.",
load_from_db=False,
),
DropdownInput(
name="region_name",
display_name="Region Name",
value="us-east-1",
options=AWS_REGIONS,
info="The AWS region where your Bedrock resources are located.",
),
MessageTextInput(
name="endpoint_url",
display_name="Endpoint URL",
advanced=True,
info="The URL of the Bedrock endpoint to use.",
),
# Model-specific parameters for fine control
FloatInput(
name="temperature",
display_name="Temperature",
value=0.7,
info="Controls randomness in output. Higher values make output more random.",
advanced=True,
),
IntInput(
name="max_tokens",
display_name="Max Tokens",
value=4096,
info="Maximum number of tokens to generate.",
advanced=True,
),
FloatInput(
name="top_p",
display_name="Top P",
value=0.9,
info="Nucleus sampling parameter. Controls diversity of output.",
advanced=True,
),
IntInput(
name="top_k",
display_name="Top K",
value=250,
info="Limits the number of highest probability vocabulary tokens to consider. "
"Note: Not all models support top_k. Use 'Additional Model Fields' for manual configuration if needed.",
advanced=True,
),
BoolInput(
name="disable_streaming",
display_name="Disable Streaming",
value=False,
info="If True, disables streaming responses. Useful for batch processing.",
advanced=True,
),
DictInput(
name="additional_model_fields",
display_name="Additional Model Fields",
advanced=True,
is_list=True,
info="Additional model-specific parameters for fine-tuning behavior.",
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
try:
from langchain_aws.chat_models.bedrock_converse import ChatBedrockConverse
except ImportError as e:
msg = "langchain_aws is not installed. Please install it with `pip install langchain_aws`."
raise ImportError(msg) from e
# Prepare initialization parameters
init_params = {
"model": self.model_id,
"region_name": self.region_name,
}
# Add AWS credentials if provided
if self.aws_access_key_id:
init_params["aws_access_key_id"] = self.aws_access_key_id
if self.aws_secret_access_key:
init_params["aws_secret_access_key"] = self.aws_secret_access_key
if self.aws_session_token:
init_params["aws_session_token"] = self.aws_session_token
if self.credentials_profile_name:
init_params["credentials_profile_name"] = self.credentials_profile_name
if self.endpoint_url:
init_params["endpoint_url"] = self.endpoint_url
# Add model parameters directly as supported by ChatBedrockConverse
if hasattr(self, "temperature") and self.temperature is not None:
init_params["temperature"] = self.temperature
if hasattr(self, "max_tokens") and self.max_tokens is not None:
init_params["max_tokens"] = self.max_tokens
if hasattr(self, "top_p") and self.top_p is not None:
init_params["top_p"] = self.top_p
# Handle streaming - only disable if explicitly requested
if hasattr(self, "disable_streaming") and self.disable_streaming:
init_params["disable_streaming"] = True
# Handle additional model request fields carefully
# Based on the error, inferenceConfig should not be passed as additional fields for some models
additional_model_request_fields = {}
# Only add top_k if user explicitly provided additional fields or if needed for specific models
if hasattr(self, "additional_model_fields") and self.additional_model_fields:
for field in self.additional_model_fields:
if isinstance(field, dict):
additional_model_request_fields.update(field)
# For now, don't automatically add inferenceConfig for top_k to avoid validation errors
# Users can manually add it via additional_model_fields if their model supports it
# Only add if we have actual additional fields
if additional_model_request_fields:
init_params["additional_model_request_fields"] = additional_model_request_fields
try:
output = ChatBedrockConverse(**init_params)
except Exception as e:
# Provide helpful error message with fallback suggestions
error_details = str(e)
if "validation error" in error_details.lower():
msg = (
f"ChatBedrockConverse validation error: {error_details}. "
f"This may be due to incompatible parameters for model '{self.model_id}'. "
f"Consider adjusting the model parameters or trying the legacy Amazon Bedrock component."
)
elif "converse api" in error_details.lower():
msg = (
f"Converse API error: {error_details}. "
f"The model '{self.model_id}' may not support the Converse API. "
f"Try using the legacy Amazon Bedrock component instead."
)
else:
msg = f"Could not initialize ChatBedrockConverse: {error_details}"
raise ValueError(msg) from e
return output
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/amazon/amazon_bedrock_converse.py",
"license": "MIT License",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/airtable_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioAirtableAPIComponent(ComposioBaseComponent):
display_name: str = "Airtable"
icon = "Airtable"
documentation: str = "https://docs.composio.dev"
app_name = "airtable"
def set_default_tools(self):
"""Set the default tools for Airtable component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/airtable_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/asana_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioAsanaAPIComponent(ComposioBaseComponent):
display_name: str = "Asana"
icon = "Asana"
documentation: str = "https://docs.composio.dev"
app_name = "asana"
def set_default_tools(self):
"""Set the default tools for Asana component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/asana_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/attio_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioAttioAPIComponent(ComposioBaseComponent):
display_name: str = "Attio"
icon = "Attio"
documentation: str = "https://docs.composio.dev"
app_name = "attio"
def set_default_tools(self):
"""Set the default tools for Attio component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/attio_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/calendly_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioCalendlyAPIComponent(ComposioBaseComponent):
display_name: str = "Calendly"
icon = "Calendly"
documentation: str = "https://docs.composio.dev"
app_name = "calendly"
def set_default_tools(self):
"""Set the default tools for Calendly component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/calendly_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/contentful_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioContentfulAPIComponent(ComposioBaseComponent):
display_name: str = "Contentful"
icon = "Contentful"
documentation: str = "https://docs.composio.dev"
app_name = "contentful"
def set_default_tools(self):
"""Set the default tools for Contentful component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/contentful_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/discord_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioDiscordAPIComponent(ComposioBaseComponent):
display_name: str = "Discord"
icon = "discord"
documentation: str = "https://docs.composio.dev"
app_name = "discord"
def set_default_tools(self):
"""Set the default tools for Discord component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/discord_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/figma_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioFigmaAPIComponent(ComposioBaseComponent):
display_name: str = "Figma"
icon = "Figma"
documentation: str = "https://docs.composio.dev"
app_name = "figma"
def set_default_tools(self):
"""Set the default tools for Figma component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/figma_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/googledocs_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioGoogleDocsAPIComponent(ComposioBaseComponent):
display_name: str = "GoogleDocs"
icon = "Googledocs"
documentation: str = "https://docs.composio.dev"
app_name = "googledocs"
def set_default_tools(self):
"""Set the default tools for Google Docs component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/googledocs_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/googlesheets_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioGoogleSheetsAPIComponent(ComposioBaseComponent):
display_name: str = "GoogleSheets"
icon = "Googlesheets"
documentation: str = "https://docs.composio.dev"
app_name = "googlesheets"
def set_default_tools(self):
"""Set the default tools for Google Sheets component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/googlesheets_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/klaviyo_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioKlaviyoAPIComponent(ComposioBaseComponent):
display_name: str = "Klaviyo"
icon = "Klaviyo"
documentation: str = "https://docs.composio.dev"
app_name = "klaviyo"
def set_default_tools(self):
"""Set the default tools for Klaviyo component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/klaviyo_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/miro_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioMiroAPIComponent(ComposioBaseComponent):
display_name: str = "Miro"
icon = "Miro"
documentation: str = "https://docs.composio.dev"
app_name = "miro"
def set_default_tools(self):
"""Set the default tools for Miro component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/miro_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/notion_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioNotionAPIComponent(ComposioBaseComponent):
display_name: str = "Notion"
icon = "Notion"
documentation: str = "https://docs.composio.dev"
app_name = "notion"
def set_default_tools(self):
"""Set the default tools for Notion component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/notion_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/onedrive_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioOneDriveAPIComponent(ComposioBaseComponent):
display_name: str = "OneDrive"
icon = "One_Drive"
documentation: str = "https://docs.composio.dev"
app_name = "one_drive"
def set_default_tools(self):
"""Set the default tools for OneDrive component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/onedrive_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/src/lfx/components/composio/wrike_composio.py | from lfx.base.composio.composio_base import ComposioBaseComponent
class ComposioWrikeAPIComponent(ComposioBaseComponent):
display_name: str = "Wrike"
icon = "Wrike"
documentation: str = "https://docs.composio.dev"
app_name = "wrike"
def set_default_tools(self):
"""Set the default tools for Wrike component."""
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/src/lfx/components/composio/wrike_composio.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/lfx/tests/unit/cli/test_run_starter_projects.py | """Test run command with all starter project templates.
Tests that all starter project JSON files can be loaded by lfx run command
without import errors for langflow modules. We expect execution errors
(missing API keys, etc.) but no import/module errors.
"""
import json
from pathlib import Path
import pytest
from lfx.__main__ import app
from typer.testing import CliRunner
runner = CliRunner()
def get_starter_projects_path() -> Path:
"""Get path to starter projects directory."""
# Use absolute path to find the starter projects
test_file_path = Path(__file__).resolve()
# Navigate up to find the langflow project root
current = test_file_path.parent
while current != current.parent:
if (current / "src" / "backend" / "base" / "langflow" / "initial_setup" / "starter_projects").exists():
return current / "src" / "backend" / "base" / "langflow" / "initial_setup" / "starter_projects"
current = current.parent
# Fallback to a relative path from the test file
# test_file is in: src/lfx/tests/unit/cli
# starter projects are in: src/backend/base/langflow/initial_setup/starter_projects
project_root = test_file_path.parent.parent.parent.parent.parent
return project_root / "backend" / "base" / "langflow" / "initial_setup" / "starter_projects"
def get_starter_project_files():
"""Get all starter project JSON files for parameterization."""
starter_path = get_starter_projects_path()
if not starter_path.exists():
return []
return sorted(starter_path.glob("*.json"))
class TestRunStarterProjects:
"""Test run command with all starter project templates."""
def test_starter_projects_exist(self):
"""Test that starter projects directory exists and has templates."""
path = get_starter_projects_path()
assert path.exists(), f"Starter projects directory not found: {path}"
templates = get_starter_project_files()
assert len(templates) > 0, "No starter project files found"
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_starter_project_no_import_errors(self, template_file):
"""Test that starter project can be loaded without langflow or lfx import errors.
We expect execution errors (missing API keys, missing inputs, etc.)
but there should be NO errors about importing langflow or lfx modules.
"""
# Run the command with --no-check-variables to skip variable validation
# Use verbose mode to get detailed error messages in stderr
result = runner.invoke(
app,
["run", "--verbose", "--no-check-variables", str(template_file), "test input"],
)
# The command will likely fail due to missing API keys, etc.
# But we're checking that there are no import errors
# Use the combined output provided by Click/Typer
all_output = result.output
# Check for import errors related to langflow or lfx
if "ModuleNotFoundError" in all_output or "ImportError" in all_output or "Module" in all_output:
# Check for langflow import errors
if "No module named 'langflow'" in all_output or "Module langflow" in all_output:
# Extract the specific error for better debugging
error_line = ""
for line in all_output.split("\n"):
if "langflow" in line and ("No module named" in line or "Module" in line):
error_line = line.strip()
break
pytest.fail(f"Langflow import error found in {template_file.name}.\nError: {error_line}")
# Check for lfx import errors (these indicate structural issues)
if "No module named 'lfx." in all_output or "Module lfx." in all_output:
# Extract the specific error for better debugging
import re
# Remove ANSI color codes for cleaner output
clean_output = re.sub(r"\x1b\[[0-9;]*m", "", all_output)
error_lines = []
for line in clean_output.split("\n"):
if "lfx" in line and ("No module named" in line or "Module lfx." in line):
# Extract just the module name from various error formats
if "No module named" in line:
match = re.search(r"No module named ['\"]([^'\"]+)['\"]", line)
if match:
error_lines.append(f" - Missing module: {match.group(1)}")
elif "Module lfx." in line and "not found" in line:
match = re.search(r"Module (lfx\.[^\s]+)", line)
if match:
error_lines.append(f" - Missing module: {match.group(1)}")
# Deduplicate while preserving order
seen = set()
unique_errors = []
for error in error_lines:
if error not in seen:
seen.add(error)
unique_errors.append(error)
error_detail = "\n".join(unique_errors[:5]) # Show first 5 unique lfx errors
pytest.fail(
f"LFX import error found in {template_file.name}.\n"
f"This indicates lfx internal structure issues.\n"
f"Missing modules:\n{error_detail}"
)
# Check for other critical import errors
if "cannot import name" in all_output and ("langflow" in all_output or "lfx" in all_output):
# Extract the specific import error
error_line = ""
for line in all_output.split("\n"):
if "cannot import name" in line:
error_line = line.strip()
break
pytest.fail(f"Import error found in {template_file.name}.\nError: {error_line}")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_starter_project_valid_json(self, template_file):
"""Test that starter project file is valid JSON."""
with template_file.open(encoding="utf-8") as f:
try:
data = json.load(f)
# Basic structure validation
assert "data" in data or "nodes" in data, f"Missing 'data' or 'nodes' in {template_file.name}"
except json.JSONDecodeError as e:
pytest.fail(f"Invalid JSON in {template_file.name}: {e}")
@pytest.mark.parametrize("template_file", get_starter_project_files(), ids=lambda x: x.name)
def test_run_starter_project_format_options(self, template_file):
"""Test that starter projects can be run with different output formats.
This tests that the basic command parsing works, even if execution fails.
"""
formats = ["json", "text", "message", "result"]
for fmt in formats:
result = runner.invoke(
app,
["run", "--format", fmt, "--no-check-variables", str(template_file), "test"],
)
# We don't check exit code as it may fail due to missing dependencies
# We just want to ensure the command is parsed and attempted
# Check that we got some output (even if it's an error)
assert len(result.output) > 0, f"No output for {template_file.name} with format {fmt}"
def test_run_basic_starter_projects_detailed(self):
"""Test basic starter projects that should have minimal dependencies."""
basic_templates = [
"Basic Prompting.json",
"Basic Prompt Chaining.json",
]
starter_path = get_starter_projects_path()
for template_name in basic_templates:
template_file = starter_path / template_name
if not template_file.exists():
continue
result = runner.invoke(
app,
["run", "--verbose", "--no-check-variables", str(template_file), "Hello test"],
)
# These basic templates might still fail due to missing LLM API keys
# but should not have import errors
all_output = result.output
# More specific checks for these basic templates
assert "No module named 'langflow'" not in all_output, f"Langflow import error in {template_name}"
# Check for module not found errors specifically related to langflow
# (Settings service errors are runtime errors, not import errors)
if "ModuleNotFoundError" in all_output and "langflow" in all_output and "lfx.services" not in all_output:
# This is an actual langflow import error, not an internal lfx error
pytest.fail(f"Module not found error for langflow in {template_name}")
@pytest.mark.parametrize("template_file", get_starter_project_files()[:5], ids=lambda x: x.name)
def test_run_starter_project_with_stdin(self, template_file):
"""Test loading starter projects via stdin (testing first 5 for speed)."""
with template_file.open(encoding="utf-8") as f:
json_content = f.read()
result = runner.invoke(
app,
["run", "--stdin", "--no-check-variables", "--input-value", "test"],
input=json_content,
)
# Check that the command attempted to process the input
assert len(result.output) > 0
# Verify no import errors
all_output = result.output
assert "No module named 'langflow'" not in all_output
@pytest.mark.parametrize("template_file", get_starter_project_files()[:5], ids=lambda x: x.name)
def test_run_starter_project_inline_json(self, template_file):
"""Test loading starter projects via --flow-json option (testing first 5 for speed)."""
with template_file.open(encoding="utf-8") as f:
json_content = f.read()
result = runner.invoke(
app,
["run", "--flow-json", json_content, "--no-check-variables", "--input-value", "test"],
)
# Check that the command attempted to process the input
assert len(result.output) > 0
# Verify no import errors
all_output = result.output
assert "No module named 'langflow'" not in all_output
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/cli/test_run_starter_projects.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/backend/tests/unit/test_lfx_reexport_modules.py | """Test to ensure all langflow modules that re-export lfx modules work correctly.
This test validates that every langflow module that re-exports from lfx
can successfully import and access all expected symbols, maintaining
backward compatibility and proper API exposure.
Based on analysis, there are 24 langflow modules that re-export from lfx:
Base Modules (11):
- langflow.base (wildcard from lfx.base)
- langflow.base.agents (from lfx.base.agents)
- langflow.base.data (from lfx.base.data)
- langflow.base.embeddings (from lfx.base.embeddings)
- langflow.base.io (from lfx.base.io)
- langflow.base.memory (from lfx.base.memory)
- langflow.base.models (from lfx.base.models)
- langflow.base.prompts (from lfx.base.prompts)
- langflow.base.textsplitters (from lfx.base.textsplitters)
- langflow.base.tools (from lfx.base.tools)
- langflow.base.vectorstores (from lfx.base.vectorstores)
Core System Modules (13):
- langflow.custom (from lfx.custom)
- langflow.custom.custom_component (from lfx.custom.custom_component)
- langflow.field_typing (from lfx.field_typing with __getattr__)
- langflow.graph (from lfx.graph)
- langflow.inputs (from lfx.inputs.inputs)
- langflow.interface (from lfx.interface)
- langflow.io (from lfx.io + lfx.template)
- langflow.load (from lfx.load)
- langflow.logging (from lfx.log.logger)
- langflow.schema (from lfx.schema)
- langflow.template (wildcard from lfx.template)
- langflow.template.field (from lfx.template.field)
"""
import importlib
import inspect
import pkgutil
import re
import time
from pathlib import Path
import pytest
def get_all_reexport_modules():
"""Get all known re-export modules for parametrized testing."""
# Define the modules here so they can be accessed by parametrize
direct_reexport_modules = {
"langflow.base.agents": "lfx.base.agents",
"langflow.base.data": "lfx.base.data",
"langflow.base.embeddings": "lfx.base.embeddings",
"langflow.base.io": "lfx.base.io",
"langflow.base.memory": "lfx.base.memory",
"langflow.base.models": "lfx.base.models",
"langflow.base.prompts": "lfx.base.prompts",
"langflow.base.textsplitters": "lfx.base.textsplitters",
"langflow.base.tools": "lfx.base.tools",
"langflow.base.vectorstores": "lfx.base.vectorstores",
"langflow.custom.custom_component": "lfx.custom.custom_component",
"langflow.graph": "lfx.graph",
"langflow.inputs": "lfx.inputs.inputs",
"langflow.interface": "lfx.interface",
"langflow.load": "lfx.load",
"langflow.logging": "lfx.log",
"langflow.schema": "lfx.schema",
"langflow.template.field": "lfx.template.field",
}
wildcard_reexport_modules = {
"langflow.base": "lfx.base",
"langflow.template": "lfx.template",
}
complex_reexport_modules = {
"langflow.custom": ["lfx.custom", "lfx.custom.custom_component", "lfx.custom.utils"],
"langflow.io": ["lfx.io", "lfx.template"],
}
dynamic_reexport_modules = {
"langflow.field_typing": "lfx.field_typing",
}
return list(
{
**direct_reexport_modules,
**wildcard_reexport_modules,
**complex_reexport_modules,
**dynamic_reexport_modules,
}.keys()
)
class TestLfxReexportModules:
"""Test that all langflow modules that re-export from lfx work correctly."""
@classmethod
def _discover_langflow_modules(cls) -> list[str]:
"""Dynamically discover all langflow modules."""
langflow_modules: list[str] = []
try:
import langflow
for _importer, modname, _ispkg in pkgutil.walk_packages(langflow.__path__, langflow.__name__ + "."):
langflow_modules.append(modname)
except ImportError:
pass
return langflow_modules
@classmethod
def _detect_reexport_pattern(cls, module_name: str) -> dict[str, str | None]:
"""Detect what kind of re-export pattern a module uses."""
try:
module = importlib.import_module(module_name)
# Check if module has source code that mentions lfx
source_file = getattr(module, "__file__", None)
if source_file:
try:
with Path(source_file).open() as f:
content = f.read()
if "from lfx" in content:
# Try to extract the lfx module being imported
patterns = [
r"from (lfx\.[.\w]+) import",
r"from (lfx\.[.\w]+) import \*",
r"import (lfx\.[.\w]+)",
]
for pattern in patterns:
match = re.search(pattern, content)
if match:
return {"type": "direct", "source": match.group(1)}
if "__getattr__" in content and "lfx" in content:
return {"type": "dynamic", "source": None}
# If we get here, file exists but no patterns matched
return {"type": "none", "source": None}
except (OSError, UnicodeDecodeError):
return {"type": "none", "source": None}
else:
return {"type": "none", "source": None}
except ImportError:
return {"type": "import_error", "source": None}
@classmethod
def _get_expected_symbols(cls, lfx_source: str | None = None) -> list[str]:
"""Get expected symbols that should be available in a module."""
if not lfx_source:
return []
try:
lfx_module = importlib.import_module(lfx_source)
if hasattr(lfx_module, "__all__"):
return list(lfx_module.__all__)
# Return public attributes (not starting with _)
return [name for name in dir(lfx_module) if not name.startswith("_")]
except ImportError:
return []
# Define all the modules that re-export from lfx (kept for backward compatibility)
DIRECT_REEXPORT_MODULES = {
# Base modules with direct re-exports
"langflow.base.agents": "lfx.base.agents",
"langflow.base.data": "lfx.base.data",
"langflow.base.embeddings": "lfx.base.embeddings",
"langflow.base.io": "lfx.base.io",
"langflow.base.memory": "lfx.base.memory",
"langflow.base.models": "lfx.base.models",
"langflow.base.prompts": "lfx.base.prompts",
"langflow.base.textsplitters": "lfx.base.textsplitters",
"langflow.base.tools": "lfx.base.tools",
"langflow.base.vectorstores": "lfx.base.vectorstores",
# Core system modules with direct re-exports
"langflow.custom.custom_component": "lfx.custom.custom_component",
"langflow.graph": "lfx.graph",
"langflow.inputs": "lfx.inputs.inputs",
"langflow.interface": "lfx.interface",
"langflow.load": "lfx.load",
"langflow.logging": "lfx.log", # Note: imports from lfx.log.logger
"langflow.schema": "lfx.schema",
"langflow.template.field": "lfx.template.field",
}
# Modules that use wildcard imports from lfx
WILDCARD_REEXPORT_MODULES = {
"langflow.base": "lfx.base",
"langflow.template": "lfx.template",
}
# Modules with complex/mixed import patterns
COMPLEX_REEXPORT_MODULES = {
"langflow.custom": ["lfx.custom", "lfx.custom.custom_component", "lfx.custom.utils"],
"langflow.io": ["lfx.io", "lfx.template"], # Mixed imports
}
# Modules with dynamic __getattr__ patterns
DYNAMIC_REEXPORT_MODULES = {
"langflow.field_typing": "lfx.field_typing",
}
def test_direct_reexport_modules_importable(self):
"""Test that all direct re-export modules can be imported."""
successful_imports = 0
for langflow_module, lfx_module in self.DIRECT_REEXPORT_MODULES.items():
try:
# Import the langflow module
lf_module = importlib.import_module(langflow_module)
assert lf_module is not None, f"Langflow module {langflow_module} is None"
# Import the corresponding lfx module to compare
lfx_mod = importlib.import_module(lfx_module)
assert lfx_mod is not None, f"LFX module {lfx_module} is None"
successful_imports += 1
except Exception as e:
pytest.fail(f"Failed to import direct re-export module {langflow_module}: {e!s}")
def test_wildcard_reexport_modules_importable(self):
"""Test that modules using wildcard imports work correctly."""
successful_imports = 0
for langflow_module, lfx_module in self.WILDCARD_REEXPORT_MODULES.items():
try:
# Import the langflow module
lf_module = importlib.import_module(langflow_module)
assert lf_module is not None, f"Langflow module {langflow_module} is None"
# Wildcard imports should expose most/all attributes from lfx module
lfx_mod = importlib.import_module(lfx_module)
# Check that all attributes are available
if hasattr(lfx_mod, "__all__"):
all_attrs = list(lfx_mod.__all__) # Test all attributes
for attr in all_attrs:
if hasattr(lfx_mod, attr):
assert hasattr(lf_module, attr), f"Attribute {attr} missing from {langflow_module}"
successful_imports += 1
except Exception as e:
pytest.fail(f"Failed to import wildcard re-export module {langflow_module}: {e!s}")
def test_complex_reexport_modules_importable(self):
"""Test that modules with complex/mixed import patterns work correctly."""
successful_imports = 0
for langflow_module in self.COMPLEX_REEXPORT_MODULES:
try:
# Import the langflow module
lf_module = importlib.import_module(langflow_module)
assert lf_module is not None, f"Langflow module {langflow_module} is None"
# Verify it has __all__ attribute for complex modules
assert hasattr(lf_module, "__all__"), f"Complex module {langflow_module} missing __all__"
assert len(lf_module.__all__) > 0, f"Complex module {langflow_module} has empty __all__"
# Try to access all items from __all__
all_items = lf_module.__all__ # Test all items
for item in all_items:
try:
attr = getattr(lf_module, item)
assert attr is not None, f"Attribute {item} is None in {langflow_module}"
except AttributeError:
pytest.fail(f"Complex module {langflow_module} missing expected attribute {item} from __all__")
successful_imports += 1
except Exception as e:
pytest.fail(f"Failed to import complex re-export module {langflow_module}: {e!s}")
def test_dynamic_reexport_modules_importable(self):
"""Test that modules with __getattr__ dynamic loading work correctly."""
successful_imports = 0
for langflow_module in self.DYNAMIC_REEXPORT_MODULES:
try:
# Import the langflow module
lf_module = importlib.import_module(langflow_module)
assert lf_module is not None, f"Langflow module {langflow_module} is None"
# Dynamic modules should have __getattr__ method
assert hasattr(lf_module, "__getattr__"), f"Dynamic module {langflow_module} missing __getattr__"
# Test accessing some known attributes dynamically
if langflow_module == "langflow.field_typing":
# Test some known field typing constants
test_attrs = ["Data", "Text", "LanguageModel"]
for attr in test_attrs:
try:
value = getattr(lf_module, attr)
assert value is not None, f"Dynamic attribute {attr} is None"
except AttributeError:
pytest.fail(f"Dynamic module {langflow_module} missing expected attribute {attr}")
successful_imports += 1
except Exception as e:
pytest.fail(f"Failed to import dynamic re-export module {langflow_module}: {e!s}")
def test_all_reexport_modules_have_required_structure(self):
"""Test that re-export modules have the expected structure."""
all_modules = {}
all_modules.update(self.DIRECT_REEXPORT_MODULES)
all_modules.update(self.WILDCARD_REEXPORT_MODULES)
all_modules.update(self.DYNAMIC_REEXPORT_MODULES)
# Add complex modules
for lf_mod in self.COMPLEX_REEXPORT_MODULES:
all_modules[lf_mod] = self.COMPLEX_REEXPORT_MODULES[lf_mod]
for langflow_module in all_modules:
try:
lf_module = importlib.import_module(langflow_module)
# All modules should be importable
assert lf_module is not None
# Most should have __name__ attribute
assert hasattr(lf_module, "__name__")
# Check for basic module structure
assert hasattr(lf_module, "__file__") or hasattr(lf_module, "__path__")
except Exception as e:
pytest.fail(f"Module structure issue with {langflow_module}: {e!s}")
def test_reexport_modules_backward_compatibility(self):
"""Test that common import patterns still work for backward compatibility."""
# Test some key imports that should always work
backward_compatible_imports = [
("langflow.schema", "Data"),
("langflow.inputs", "StrInput"),
("langflow.inputs", "IntInput"),
("langflow.custom", "Component"), # Base component class
("langflow.custom", "CustomComponent"),
("langflow.field_typing", "Text"), # Dynamic
("langflow.field_typing", "Data"), # Dynamic
("langflow.load", "load_flow_from_json"),
("langflow.logging", "logger"),
]
for module_name, symbol_name in backward_compatible_imports:
try:
module = importlib.import_module(module_name)
symbol = getattr(module, symbol_name)
assert symbol is not None
# For callable objects, ensure they're callable
if inspect.isclass(symbol) or inspect.isfunction(symbol):
assert callable(symbol)
except Exception as e:
pytest.fail(f"Backward compatibility issue with {module_name}.{symbol_name}: {e!s}")
def test_no_circular_imports_in_reexports(self):
"""Test that there are no circular import issues in re-export modules."""
# Test importing modules in different orders to catch circular imports
import_orders = [
["langflow.schema", "langflow.inputs", "langflow.base"],
["langflow.base", "langflow.schema", "langflow.inputs"],
["langflow.inputs", "langflow.base", "langflow.schema"],
["langflow.custom", "langflow.field_typing", "langflow.template"],
["langflow.template", "langflow.custom", "langflow.field_typing"],
["langflow.field_typing", "langflow.template", "langflow.custom"],
]
for order in import_orders:
try:
for module_name in order:
importlib.import_module(module_name)
# Try to access something from each module to trigger full loading
module = importlib.import_module(module_name)
if hasattr(module, "__all__") and module.__all__:
# Try to access first item in __all__
first_item = module.__all__[0]
try:
getattr(module, first_item)
except AttributeError:
pytest.fail(f"Module {module_name} missing expected attribute {first_item} from __all__")
except Exception as e:
pytest.fail(f"Circular import issue with order {order}: {e!s}")
def test_reexport_modules_performance(self):
"""Test that re-export modules import efficiently."""
# Test that basic imports are fast
performance_critical_modules = [
"langflow.schema",
"langflow.inputs",
"langflow.field_typing",
"langflow.load",
"langflow.logging",
]
slow_imports = []
for module_name in performance_critical_modules:
start_time = time.time()
try:
importlib.import_module(module_name)
import_time = time.time() - start_time
# Re-export modules should import quickly (< 1 second)
if import_time > 1.0:
slow_imports.append(f"{module_name}: {import_time:.3f}s")
except ImportError:
# Import failures are tested elsewhere
pass
# Don't fail the test, just record slow imports for information
def test_coverage_completeness(self):
"""Test that we're testing all known re-export modules."""
# This test ensures we don't miss any re-export modules
all_tested_modules = set()
all_tested_modules.update(self.DIRECT_REEXPORT_MODULES.keys())
all_tested_modules.update(self.WILDCARD_REEXPORT_MODULES.keys())
all_tested_modules.update(self.COMPLEX_REEXPORT_MODULES.keys())
all_tested_modules.update(self.DYNAMIC_REEXPORT_MODULES.keys())
# Should be testing all 24 identified modules based on our analysis
actual_count = len(all_tested_modules)
# Ensure we have a reasonable number of modules
assert actual_count >= 20, f"Too few modules being tested: {actual_count}"
assert actual_count <= 30, f"Too many modules being tested: {actual_count}"
# Dynamic test methods using the discovery functions
def test_dynamic_module_discovery(self):
"""Test that we can dynamically discover langflow modules."""
modules = self._discover_langflow_modules()
assert len(modules) > 0, "Should discover at least some langflow modules"
# Check that known modules are found
expected_modules = ["langflow.schema", "langflow.inputs", "langflow.custom"]
found_modules = [mod for mod in expected_modules if mod in modules]
assert len(found_modules) > 0, f"Expected to find some of {expected_modules}, but found: {found_modules}"
@pytest.mark.parametrize("module_name", get_all_reexport_modules())
def test_parametrized_module_import_and_pattern_detection(self, module_name: str):
"""Parametrized test that checks module import and pattern detection."""
# Test that module can be imported
try:
module = importlib.import_module(module_name)
assert module is not None, f"Module {module_name} should not be None"
except ImportError:
pytest.fail(f"Could not import {module_name}")
# Test pattern detection
pattern_info = self._detect_reexport_pattern(module_name)
assert isinstance(pattern_info, dict), "Pattern detection should return a dict"
assert "type" in pattern_info, "Pattern info should have 'type' key"
assert pattern_info["type"] in ["direct", "dynamic", "none", "import_error"], (
f"Unknown pattern type: {pattern_info['type']}"
)
def test_generate_backward_compatibility_imports(self):
"""Test generating backward compatibility imports dynamically."""
# Test with a known module that has lfx imports
test_cases = [("langflow.schema", "lfx.schema"), ("langflow.custom", "lfx.custom")]
for lf_module, expected_lfx_source in test_cases:
lfx_symbols = self._get_expected_symbols(expected_lfx_source)
assert len(lfx_symbols) > 0, f"Should find some symbols in {expected_lfx_source}"
# Test that symbols explicitly re-exported by langflow module are accessible
lf_module_obj = importlib.import_module(lf_module)
# Get the symbols that langflow explicitly re-exports (from its __all__)
if hasattr(lf_module_obj, "__all__"):
lf_reexported = lf_module_obj.__all__
# Check that these re-exported symbols are actually available
available_symbols = [sym for sym in lf_reexported if hasattr(lf_module_obj, sym)]
assert len(available_symbols) > 0, f"Module {lf_module} should have symbols from its __all__"
# Verify that at least some of the re-exported symbols come from lfx
lfx_sourced = [sym for sym in available_symbols if sym in lfx_symbols]
assert len(lfx_sourced) > 0, (
f"Module {lf_module} should re-export some symbols from {expected_lfx_source}"
)
else:
# If no __all__, just check that some lfx symbols are accessible
available_symbols = [sym for sym in lfx_symbols[:10] if hasattr(lf_module_obj, sym)]
assert len(available_symbols) > 0, (
f"Module {lf_module} should have some symbols from {expected_lfx_source}"
)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/unit/test_lfx_reexport_modules.py",
"license": "MIT License",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:src/lfx/tests/unit/custom/component/test_validate.py | from textwrap import dedent
from lfx.custom.validate import create_class
def test_importing_langflow_module_in_lfx():
code = dedent("""from langflow.custom import Component
class TestComponent(Component):
def some_method(self):
pass
""")
result = create_class(code, "TestComponent")
assert result.__name__ == "TestComponent"
def test_importing_langflow_logging_in_lfx():
"""Test that langflow.logging can be imported in lfx context without errors."""
code = dedent("""
from langflow.logging import logger, configure
from langflow.custom import Component
class TestLoggingComponent(Component):
def some_method(self):
# Test that both logger and configure work
configure(log_level="INFO")
logger.info("Test message from component")
return "success"
""")
result = create_class(code, "TestLoggingComponent")
assert result.__name__ == "TestLoggingComponent"
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/lfx/tests/unit/custom/component/test_validate.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langflow-ai/langflow:scripts/ci/lfx_nightly_tag.py | """Script to generate nightly tags for LFX package."""
import packaging.version
import requests
from packaging.version import Version
PYPI_LFX_URL = "https://pypi.org/pypi/lfx/json"
PYPI_LFX_NIGHTLY_URL = "https://pypi.org/pypi/lfx-nightly/json"
def get_latest_published_version(*, is_nightly: bool) -> Version:
url = PYPI_LFX_NIGHTLY_URL if is_nightly else PYPI_LFX_URL
res = requests.get(url, timeout=10)
if res.status_code == requests.codes.not_found:
msg = "Package not found on PyPI"
raise requests.RequestException(msg)
try:
version_str = res.json()["info"]["version"]
except (KeyError, ValueError) as e:
msg = "Got unexpected response from PyPI"
raise requests.RequestException(msg) from e
return Version(version_str)
def create_lfx_tag():
# Since LFX has never been released, we'll use the version from pyproject.toml as base
from pathlib import Path
import tomllib
# Read version from pyproject.toml
lfx_pyproject_path = Path(__file__).parent.parent.parent / "src" / "lfx" / "pyproject.toml"
pyproject_data = tomllib.loads(lfx_pyproject_path.read_text())
current_version_str = pyproject_data["project"]["version"]
current_version = Version(current_version_str)
try:
current_nightly_version = get_latest_published_version(is_nightly=True)
nightly_base_version = current_nightly_version.base_version
except (requests.RequestException, KeyError, ValueError):
# If LFX nightly doesn't exist on PyPI yet, this is the first nightly
current_nightly_version = None
nightly_base_version = None
build_number = "0"
latest_base_version = current_version.base_version
if current_nightly_version and latest_base_version == nightly_base_version:
# If the latest version is the same as the nightly version, increment the build number
build_number = str(current_nightly_version.dev + 1)
new_nightly_version = latest_base_version + ".dev" + build_number
# Prepend "v" to the version, if DNE.
# This is an update to the nightly version format.
if not new_nightly_version.startswith("v"):
new_nightly_version = "v" + new_nightly_version
# Verify if version is PEP440 compliant.
packaging.version.Version(new_nightly_version)
return new_nightly_version
if __name__ == "__main__":
tag = create_lfx_tag()
print(tag)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "scripts/ci/lfx_nightly_tag.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langflow-ai/langflow:scripts/ci/update_lfx_version.py | """Script to update LFX version for nightly builds."""
import re
import sys
from pathlib import Path
from update_pyproject_name import update_pyproject_name
from update_pyproject_version import update_pyproject_version
# Add the current directory to the path so we can import the other scripts
current_dir = Path(__file__).resolve().parent
sys.path.append(str(current_dir))
BASE_DIR = Path(__file__).parent.parent.parent
def update_lfx_workspace_dep(pyproject_path: str, new_project_name: str) -> None:
"""Update the LFX workspace dependency in pyproject.toml."""
filepath = BASE_DIR / pyproject_path
content = filepath.read_text(encoding="utf-8")
if new_project_name == "lfx-nightly":
pattern = re.compile(r"lfx = \{ workspace = true \}")
replacement = "lfx-nightly = { workspace = true }"
else:
msg = f"Invalid LFX project name: {new_project_name}"
raise ValueError(msg)
# Updates the dependency name for uv
if not pattern.search(content):
msg = f"lfx workspace dependency not found in {filepath}"
raise ValueError(msg)
content = pattern.sub(replacement, content)
filepath.write_text(content, encoding="utf-8")
def update_lfx_for_nightly(lfx_tag: str):
"""Update LFX package for nightly build.
Args:
lfx_tag: The nightly tag for LFX (e.g., "v0.1.0.dev0")
"""
lfx_pyproject_path = "src/lfx/pyproject.toml"
# Update name to lfx-nightly
update_pyproject_name(lfx_pyproject_path, "lfx-nightly")
# Update version (strip 'v' prefix if present)
version = lfx_tag.lstrip("v")
update_pyproject_version(lfx_pyproject_path, version)
# Update workspace dependency in root pyproject.toml
update_lfx_workspace_dep("pyproject.toml", "lfx-nightly")
print(f"Updated LFX package to lfx-nightly version {version}")
def main():
"""Update LFX for nightly builds.
Usage:
update_lfx_version.py <lfx_tag>
"""
expected_args = 2
if len(sys.argv) != expected_args:
print("Usage: update_lfx_version.py <lfx_tag>")
sys.exit(1)
lfx_tag = sys.argv[1]
update_lfx_for_nightly(lfx_tag)
if __name__ == "__main__":
main()
| {
"repo_id": "langflow-ai/langflow",
"file_path": "scripts/ci/update_lfx_version.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langflow-ai/langflow:src/backend/tests/data/simple_agent.py | """A simple agent flow example for Langflow.
This script demonstrates how to set up a conversational agent using Langflow's
Agent component with web search capabilities.
Features:
- Uses the new flattened component access (cp.AgentComponent instead of deep imports)
- Configures logging to 'langflow.log' at INFO level
- Creates an agent with OpenAI GPT model
- Provides web search tools via URLComponent
- Connects ChatInput → Agent → ChatOutput
Usage:
uv run lfx run simple_agent.py "How are you?"
"""
import os
from pathlib import Path
from lfx.graph import Graph
from lfx.log.logger import LogConfig
# Using the new flattened component access
from lfx import components as cp
async def get_graph() -> Graph:
"""Create and return the graph with async component initialization.
This function properly handles async component initialization without
blocking the module loading process. The script loader will detect this
async function and handle it appropriately using run_until_complete.
Returns:
Graph: The configured graph with ChatInput → Agent → ChatOutput flow
"""
log_config = LogConfig(
log_level="INFO",
log_file=Path("langflow.log"),
)
# Showcase the new flattened component access - no need for deep imports!
chat_input = cp.ChatInput()
agent = cp.AgentComponent()
# Use URLComponent for web search capabilities
url_component = cp.URLComponent()
tools = await url_component.to_toolkit()
agent.set(
model_name="gpt-4o-mini",
agent_llm="OpenAI",
api_key=os.getenv("OPENAI_API_KEY"),
input_value=chat_input.message_response,
tools=tools,
)
chat_output = cp.ChatOutput().set(input_value=agent.message_response)
return Graph(chat_input, chat_output, log_config=log_config)
| {
"repo_id": "langflow-ai/langflow",
"file_path": "src/backend/tests/data/simple_agent.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.