sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
langflow-ai/langflow:src/backend/tests/unit/agentic/services/test_flow_types.py
"""Tests for flow execution types and constants. Tests the dataclasses and constants used in flow execution. """ from pathlib import Path from langflow.agentic.services.flow_types import ( FLOWS_BASE_PATH, LANGFLOW_ASSISTANT_FLOW, MAX_VALIDATION_RETRIES, STREAMING_EVENT_TIMEOUT_SECONDS, STREAMING_QUEUE_MAX_SIZE, TRANSLATION_FLOW, VALIDATION_RETRY_TEMPLATE, VALIDATION_UI_DELAY_SECONDS, FlowExecutionResult, IntentResult, ) class TestFlowExecutionResult: """Tests for FlowExecutionResult dataclass.""" def test_should_create_with_defaults(self): """Should create with empty result and no error by default.""" result = FlowExecutionResult() assert result.result == {} assert result.error is None def test_should_detect_error_when_set(self): """Should return has_error=True when error is set.""" result = FlowExecutionResult(error=ValueError("test error")) assert result.has_error is True assert result.has_result is False def test_should_detect_result_when_set(self): """Should return has_result=True when result is non-empty.""" result = FlowExecutionResult(result={"key": "value"}) assert result.has_result is True assert result.has_error is False def test_should_allow_both_result_and_error(self): """Should allow both result and error to be set simultaneously.""" result = FlowExecutionResult( result={"partial": "data"}, error=RuntimeError("partial failure"), ) assert result.has_result is True assert result.has_error is True def test_should_return_false_for_empty_dict_result(self): """Should return has_result=False for empty dict.""" result = FlowExecutionResult(result={}) assert result.has_result is False def test_should_store_exception_details(self): """Should preserve exception details.""" error = ValueError("detailed message") result = FlowExecutionResult(error=error) assert result.error is error assert str(result.error) == "detailed message" class TestIntentResult: """Tests for IntentResult dataclass.""" def test_should_create_with_translation_and_intent(self): """Should create with translation and intent fields.""" result = IntentResult(translation="hello world", intent="question") assert result.translation == "hello world" assert result.intent == "question" def test_should_support_generate_component_intent(self): """Should support generate_component as valid intent value.""" result = IntentResult(translation="create a component", intent="generate_component") assert result.intent == "generate_component" def test_should_be_equality_comparable(self): """Should support equality comparison.""" result1 = IntentResult(translation="test", intent="question") result2 = IntentResult(translation="test", intent="question") result3 = IntentResult(translation="test", intent="generate_component") assert result1 == result2 assert result1 != result3 def test_should_allow_empty_translation(self): """Should allow empty string as translation.""" result = IntentResult(translation="", intent="question") assert result.translation == "" class TestConstants: """Tests for module constants.""" def test_flows_base_path_should_exist(self): """FLOWS_BASE_PATH should be a valid path to flows directory.""" assert isinstance(FLOWS_BASE_PATH, Path) assert FLOWS_BASE_PATH.name == "flows" def test_flows_base_path_parent_should_be_agentic(self): """FLOWS_BASE_PATH parent should be agentic directory.""" assert FLOWS_BASE_PATH.parent.name == "agentic" def test_streaming_queue_max_size_should_be_positive(self): """STREAMING_QUEUE_MAX_SIZE should be a positive integer.""" assert isinstance(STREAMING_QUEUE_MAX_SIZE, int) assert STREAMING_QUEUE_MAX_SIZE > 0 def test_streaming_queue_max_size_should_be_reasonable(self): """STREAMING_QUEUE_MAX_SIZE should be within reasonable bounds.""" assert STREAMING_QUEUE_MAX_SIZE >= 100 assert STREAMING_QUEUE_MAX_SIZE <= 10000 def test_streaming_timeout_should_be_positive(self): """STREAMING_EVENT_TIMEOUT_SECONDS should be positive.""" assert isinstance(STREAMING_EVENT_TIMEOUT_SECONDS, float) assert STREAMING_EVENT_TIMEOUT_SECONDS > 0 def test_streaming_timeout_should_be_reasonable(self): """STREAMING_EVENT_TIMEOUT_SECONDS should be within reasonable bounds.""" assert STREAMING_EVENT_TIMEOUT_SECONDS >= 30 assert STREAMING_EVENT_TIMEOUT_SECONDS <= 600 def test_max_validation_retries_should_be_positive(self): """MAX_VALIDATION_RETRIES should be a positive integer.""" assert isinstance(MAX_VALIDATION_RETRIES, int) assert MAX_VALIDATION_RETRIES > 0 def test_max_validation_retries_should_be_reasonable(self): """MAX_VALIDATION_RETRIES should be within reasonable bounds.""" assert MAX_VALIDATION_RETRIES >= 1 assert MAX_VALIDATION_RETRIES <= 10 def test_validation_ui_delay_should_be_small(self): """VALIDATION_UI_DELAY_SECONDS should be a small positive value.""" assert isinstance(VALIDATION_UI_DELAY_SECONDS, float) assert VALIDATION_UI_DELAY_SECONDS > 0 assert VALIDATION_UI_DELAY_SECONDS < 2 def test_langflow_assistant_flow_should_be_string(self): """LANGFLOW_ASSISTANT_FLOW should be a non-empty string.""" assert isinstance(LANGFLOW_ASSISTANT_FLOW, str) assert len(LANGFLOW_ASSISTANT_FLOW) > 0 def test_translation_flow_should_be_string(self): """TRANSLATION_FLOW should be a non-empty string.""" assert isinstance(TRANSLATION_FLOW, str) assert len(TRANSLATION_FLOW) > 0 class TestValidationRetryTemplate: """Tests for VALIDATION_RETRY_TEMPLATE constant.""" def test_should_be_formattable_string(self): """Should be a string template with format placeholders.""" assert isinstance(VALIDATION_RETRY_TEMPLATE, str) assert "{error}" in VALIDATION_RETRY_TEMPLATE assert "{code}" in VALIDATION_RETRY_TEMPLATE def test_should_format_with_error_and_code(self): """Should format correctly with error and code values.""" error = "SyntaxError: invalid syntax" code = "def broken():" result = VALIDATION_RETRY_TEMPLATE.format(error=error, code=code) assert error in result assert code in result def test_should_include_fix_instruction(self): """Should include instruction to fix the error.""" template_lower = VALIDATION_RETRY_TEMPLATE.lower() assert "fix" in template_lower or "correct" in template_lower def test_should_reference_error(self): """Should reference the error in the template.""" template_lower = VALIDATION_RETRY_TEMPLATE.lower() assert "error" in template_lower def test_should_reference_code(self): """Should reference the code in the template.""" template_lower = VALIDATION_RETRY_TEMPLATE.lower() assert "code" in template_lower def test_should_format_with_multiline_code(self): """Should format correctly with multiline code.""" error = "IndentationError: unexpected indent" code = """def example(): if True: print("wrong indent")""" result = VALIDATION_RETRY_TEMPLATE.format(error=error, code=code) assert error in result assert "def example():" in result assert 'print("wrong indent")' in result
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/services/test_flow_types.py", "license": "MIT License", "lines": 155, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/base/flow_controls/loop_utils.py
"""Utility functions for loop component execution.""" from collections import deque from typing import TYPE_CHECKING from lfx.schema.data import Data if TYPE_CHECKING: from lfx.graph.graph.base import Graph from lfx.graph.vertex.base import Vertex def get_loop_body_vertices( vertex: "Vertex", graph: "Graph", get_incoming_edge_by_target_param_fn, loop_output_name: str = "item", feedback_input_name: str | None = None, ) -> set[str]: """Get all vertex IDs that are part of the loop body. Uses BFS to traverse from the loop's output to the vertex that feeds back to the loop's input, then includes all predecessors of vertices in the loop body. Args: vertex: The loop component's vertex graph: The graph containing the loop get_incoming_edge_by_target_param_fn: Function to get incoming edge by target param loop_output_name: Name of the output that starts the loop body (default: "item" for Loop, use "loop" for WhileLoop) feedback_input_name: Name of the input that receives feedback (default: same as loop_output_name) Returns: Set of vertex IDs that form the loop body """ if feedback_input_name is None: feedback_input_name = loop_output_name # Find where the loop body starts (edges from loop output) start_edges = [e for e in vertex.outgoing_edges if e.source_handle.name == loop_output_name] if not start_edges: return set() # Find where it ends (vertex feeding back to loop input) end_vertex_id = get_incoming_edge_by_target_param_fn(feedback_input_name) if not end_vertex_id: return set() # BFS from start vertices, collecting all vertices until end_vertex loop_body = set() queue = deque([e.target_id for e in start_edges]) visited = set() while queue: current = queue.popleft() if current in visited: continue visited.add(current) loop_body.add(current) # Don't traverse beyond the end vertex if current == end_vertex_id: continue # Add successors for successor_id in graph.successor_map.get(current, []): if successor_id not in visited: queue.append(successor_id) # Now recursively include all predecessors of vertices in the loop body # This ensures we include all dependencies like LLM models # We need to find predecessors by looking at successor_map in reverse def add_all_predecessors(vertex_id: str, visited_predecessors: set[str]) -> None: """Recursively add all predecessors of a vertex.""" # Find predecessors by checking which vertices have this vertex as a successor for potential_pred_id, successors in graph.successor_map.items(): if ( vertex_id in successors and potential_pred_id != vertex.id and potential_pred_id not in visited_predecessors ): visited_predecessors.add(potential_pred_id) loop_body.add(potential_pred_id) # Recursively add predecessors of this predecessor add_all_predecessors(potential_pred_id, visited_predecessors) # Track visited predecessors to avoid infinite loops visited_predecessors: set[str] = set() # Add all predecessors for each vertex in the loop body for body_vertex_id in list(loop_body): # Use list() to avoid modifying set during iteration add_all_predecessors(body_vertex_id, visited_predecessors) return loop_body def get_loop_body_start_vertex(vertex: "Vertex", loop_output_name: str = "item") -> str | None: """Get the first vertex in the loop body (connected to loop's output). Args: vertex: The loop component's vertex loop_output_name: Name of the output that starts the loop body (default: "item" for Loop, use "loop" for WhileLoop) Returns: The vertex ID of the first vertex in the loop body, or None if not found """ start_edges = [e for e in vertex.outgoing_edges if e.source_handle.name == loop_output_name] if start_edges: return start_edges[0].target_id return None def get_loop_body_start_edge(vertex: "Vertex", loop_output_name: str = "item"): """Get the edge connecting loop's output to the first vertex in loop body. Args: vertex: The loop component's vertex loop_output_name: Name of the output that starts the loop body (default: "item" for Loop, use "loop" for WhileLoop) Returns: The edge object, or None if not found """ start_edges = [e for e in vertex.outgoing_edges if e.source_handle.name == loop_output_name] if start_edges: return start_edges[0] return None def extract_loop_output(results: list, end_vertex_id: str | None) -> Data: """Extract the output from subgraph execution results. Args: results: List of VertexBuildResult objects from subgraph execution end_vertex_id: The vertex ID that feeds back to the item input (end of loop body) Returns: Data object containing the loop iteration output """ if not results: return Data(text="") if not end_vertex_id: return Data(text="") # Find the result for the end vertex for result in results: if hasattr(result, "vertex") and result.vertex.id == end_vertex_id and hasattr(result, "result_dict"): result_dict = result.result_dict if result_dict.outputs: # Get first output value first_output = next(iter(result_dict.outputs.values())) # Handle both dict (from model_dump()) and object formats message = None if isinstance(first_output, dict) and "message" in first_output: message = first_output["message"] elif hasattr(first_output, "message"): message = first_output.message if message is not None: # If message is a dict, wrap it in a Data object if isinstance(message, dict): return Data(data=message) # If it's already a Data object, return it directly if isinstance(message, Data): return message # For other types, wrap in Data with text return Data(text=str(message)) return Data(text="") def validate_data_input(data) -> list[Data]: """Validate and normalize data input to a list of Data objects. Args: data: Input data (DataFrame, Data, or list of Data) Returns: List of Data objects Raises: TypeError: If data is not a valid type """ from lfx.schema.dataframe import DataFrame if isinstance(data, DataFrame): return data.to_data_list() if isinstance(data, Data): return [data] if isinstance(data, list) and all(isinstance(item, Data) for item in data): return data msg = f"Data input must be a DataFrame, Data object, or list of Data objects, got {type(data)}" raise TypeError(msg) async def execute_loop_body( graph: "Graph", data_list: list[Data], loop_body_vertex_ids: set[str], start_vertex_id: str | None, start_edge, end_vertex_id: str | None, event_manager=None, ) -> list[Data]: """Execute loop body for each data item. Creates an isolated subgraph for the loop body and executes it for each item in the data list, collecting results. Args: graph: The graph containing the loop data_list: List of Data objects to iterate over loop_body_vertex_ids: Set of vertex IDs that form the loop body start_vertex_id: The vertex ID of the first vertex in the loop body start_edge: The edge connecting loop's item output to start vertex (contains target param info) end_vertex_id: The vertex ID that feeds back to the loop's item input event_manager: Optional event manager to pass to subgraph execution for UI events Returns: List of Data objects containing results from each iteration """ if not loop_body_vertex_ids: return [] aggregated_results = [] for item in data_list: # Create fresh subgraph for each iteration. This gives clean vertex/edge state # while sharing context between iterations (intentional for loop state). # Using async context manager ensures proper cleanup of trace tasks on exit. async with graph.create_subgraph(loop_body_vertex_ids) as iteration_subgraph: # Inject current item into vertex data BEFORE preparing the subgraph. # This ensures components have data during build/validation. if start_vertex_id and start_edge: # Get the target parameter name from the edge if not hasattr(start_edge.target_handle, "field_name"): msg = f"Edge target_handle missing field_name attribute for loop item injection: {start_edge}" raise ValueError(msg) target_param = start_edge.target_handle.field_name # Find and update the start vertex's frontend data before components are built for vertex_data in iteration_subgraph._vertices: # noqa: SLF001 if vertex_data.get("id") == start_vertex_id: # Inject the loop item into the vertex's template data if "data" in vertex_data and "node" in vertex_data["data"]: template = vertex_data["data"]["node"].get("template", {}) if target_param in template: template[target_param]["value"] = item break # Prepare the subgraph - components will be built with the injected data iteration_subgraph.prepare() # CRITICAL: Also set the value in the vertex's raw_params # Fields with type="other" (like HandleInput) are skipped during field param processing # They normally get values from edges, but we filtered out the Loop->Parser edge # So we must inject the value directly into raw_params if start_vertex_id and start_edge: start_vertex = iteration_subgraph.get_vertex(start_vertex_id) start_vertex.update_raw_params({target_param: item}, overwrite=True) # Execute subgraph and collect results # Pass event_manager so UI receives events from subgraph execution results = [] async for result in iteration_subgraph.async_start(event_manager=event_manager): results.append(result) # Stop all on error (as per design decision) if hasattr(result, "valid") and not result.valid: msg = f"Error in loop iteration: {result}" raise RuntimeError(msg) # Extract output from final result output = extract_loop_output(results, end_vertex_id) aggregated_results.append(output) return aggregated_results
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/base/flow_controls/loop_utils.py", "license": "MIT License", "lines": 227, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/lfx/tests/unit/components/flow_controls/test_loop_events.py
"""Tests for Loop component and loop utilities. These tests verify the loop body detection and component behavior. Event manager propagation is critical for UI updates during loop execution. Subgraph isolation tests are in tests/unit/graph/graph/test_subgraph_isolation.py. """ from contextlib import asynccontextmanager from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch import pytest from lfx.base.flow_controls.loop_utils import execute_loop_body, get_loop_body_vertices from lfx.components.flow_controls.loop import LoopComponent from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame def create_subgraph_context_manager_mock(subgraph_factory): """Create a mock for create_subgraph that works as an async context manager. Args: subgraph_factory: A callable that takes vertex_ids and returns a mock subgraph """ @asynccontextmanager async def mock_create_subgraph(vertex_ids): subgraph = subgraph_factory(vertex_ids) try: yield subgraph finally: pass # Cleanup would happen here in real code return mock_create_subgraph class TestLoopComponentBasics: """Basic tests for LoopComponent.""" @pytest.mark.asyncio async def test_done_output_uses_event_manager(self): """Test that done_output uses self._event_manager.""" loop = LoopComponent(_id="test_loop") loop.set(data=DataFrame([])) loop._event_manager = None # Should not raise and should work with empty data result = await loop.done_output() assert isinstance(result, DataFrame) def test_loop_validates_data_input_types(self): """Test that loop validates data input types.""" from lfx.base.flow_controls.loop_utils import validate_data_input # DataFrame should work result = validate_data_input(DataFrame([Data(text="item")])) assert len(result) == 1 # Data should work result = validate_data_input(Data(text="single")) assert len(result) == 1 # List of Data should work result = validate_data_input([Data(text="a"), Data(text="b")]) assert len(result) == 2 # Invalid type should raise with pytest.raises(TypeError): validate_data_input("invalid") class TestEventManagerPropagation: """Tests for event manager propagation through loop execution. Event manager propagation is critical - it enables the UI to receive real-time updates as each vertex in the loop body executes. """ @pytest.mark.asyncio async def test_event_manager_passed_to_subgraph_async_start(self): """Test that event_manager is passed to subgraph's async_start method.""" mock_event_manager = MagicMock() received_event_manager = None # Create a mock subgraph that captures the event_manager async def mock_async_start(event_manager=None): nonlocal received_event_manager received_event_manager = event_manager yield MagicMock(valid=True, result_dict=MagicMock(outputs={})) def create_mock_subgraph(_vertex_ids): mock_subgraph = MagicMock() mock_subgraph._vertices = [] mock_subgraph.prepare = MagicMock() mock_subgraph.async_start = mock_async_start mock_subgraph.get_vertex = MagicMock(return_value=MagicMock(custom_component=MagicMock())) return mock_subgraph mock_graph = MagicMock() mock_graph.create_subgraph = create_subgraph_context_manager_mock(create_mock_subgraph) data_list = [Data(text="item1")] await execute_loop_body( graph=mock_graph, data_list=data_list, loop_body_vertex_ids={"vertex1"}, start_vertex_id="vertex1", start_edge=MagicMock(target_handle=MagicMock(field_name="data")), end_vertex_id="vertex1", event_manager=mock_event_manager, ) # Verify event_manager was passed to async_start assert received_event_manager is mock_event_manager @pytest.mark.asyncio async def test_event_manager_passed_for_each_iteration(self): """Test that event_manager is passed to async_start for each loop iteration.""" mock_event_manager = MagicMock() event_manager_calls = [] async def mock_async_start(event_manager=None): event_manager_calls.append(event_manager) yield MagicMock(valid=True, result_dict=MagicMock(outputs={})) def create_mock_subgraph(_vertex_ids): mock_subgraph = MagicMock() mock_subgraph._vertices = [] mock_subgraph.prepare = MagicMock() mock_subgraph.async_start = mock_async_start mock_subgraph.get_vertex = MagicMock(return_value=MagicMock(custom_component=MagicMock())) return mock_subgraph mock_graph = MagicMock() mock_graph.create_subgraph = create_subgraph_context_manager_mock(create_mock_subgraph) # 3 items = 3 iterations data_list = [Data(text="item1"), Data(text="item2"), Data(text="item3")] await execute_loop_body( graph=mock_graph, data_list=data_list, loop_body_vertex_ids={"vertex1"}, start_vertex_id="vertex1", start_edge=MagicMock(target_handle=MagicMock(field_name="data")), end_vertex_id="vertex1", event_manager=mock_event_manager, ) # Verify event_manager was passed for each iteration assert len(event_manager_calls) == 3 assert all(em is mock_event_manager for em in event_manager_calls) def test_subgraph_preserves_vertex_ids(self): """Test that subgraph vertices maintain original IDs. This is critical for the UI to show updates for the correct components. If vertex IDs were modified, the UI wouldn't know which component is executing. """ mock_graph = MagicMock() # Simulate _vertices and _edges with original IDs mock_graph._vertices = [ {"id": "original_vertex_1"}, {"id": "original_vertex_2"}, ] mock_graph._edges = [] mock_graph.flow_id = "test_flow" mock_graph.flow_name = "test" mock_graph.user_id = "test_user" mock_graph.context = {} # Track what vertex IDs are used in subgraph captured_vertex_ids = None def mock_create_subgraph(vertex_ids): nonlocal captured_vertex_ids captured_vertex_ids = vertex_ids subgraph = MagicMock() # Verify the subgraph would receive original IDs subgraph._vertices = [v for v in mock_graph._vertices if v["id"] in vertex_ids] return subgraph mock_graph.create_subgraph = mock_create_subgraph # Call create_subgraph with specific vertex IDs mock_graph.create_subgraph({"original_vertex_1", "original_vertex_2"}) # Verify original IDs were passed assert captured_vertex_ids == {"original_vertex_1", "original_vertex_2"} class TestLoopComponentEventManagerPropagation: """Tests for event manager propagation through LoopComponent methods. These tests verify the component-level propagation: LoopComponent.done_output → execute_loop_body """ @pytest.mark.asyncio async def test_done_output_passes_event_manager(self): """Test that done_output properly passes self._event_manager to execute_loop_body.""" mock_event_manager = MagicMock() # Create loop component loop = LoopComponent() data_list = [Data(text="item1")] loop.set(data=DataFrame(data_list)) loop._id = "test_loop" loop._event_manager = mock_event_manager # Set event manager on component # Mock execute_loop_body to return expected data mock_execute = AsyncMock(return_value=[Data(text="result")]) # Mock initialize_data def mock_initialize_data(): pass # Create a mock context that returns data_list mock_ctx = MagicMock() mock_ctx.get = MagicMock( side_effect=lambda key, default=None: data_list if key == f"{loop._id}_data" else default ) with ( patch.object(loop, "execute_loop_body", mock_execute), patch.object(loop, "initialize_data", mock_initialize_data), patch.object(type(loop), "ctx", new_callable=PropertyMock, return_value=mock_ctx), ): result = await loop.done_output() # Verify execute_loop_body was called with the event_manager mock_execute.assert_called_once() call_args = mock_execute.call_args # Check that event_manager was passed as keyword argument assert "event_manager" in call_args.kwargs assert call_args.kwargs["event_manager"] is mock_event_manager assert isinstance(result, DataFrame) @pytest.mark.asyncio async def test_execute_loop_body_called_with_event_manager(self): """Test that execute_loop_body is invoked with event_manager from done_output.""" # Create a mock event manager mock_event_manager = MagicMock() # Create loop component with data loop = LoopComponent() data_list = [Data(text="item1"), Data(text="item2")] loop.set(data=DataFrame(data_list)) loop._id = "test_loop" # Mock the graph and vertex to simulate proper context mock_graph = MagicMock() mock_vertex = MagicMock() mock_vertex.outgoing_edges = [] loop._vertex = mock_vertex loop._graph = mock_graph # Mock get_loop_body_vertices to return empty set (no loop body) with patch.object(loop, "get_loop_body_vertices", return_value=set()): result = await loop.execute_loop_body(data_list, event_manager=mock_event_manager) # Should return empty list when no loop body vertices assert result == [] class TestRawParamsInjection: """Tests for loop item injection into vertex raw_params. These tests verify the fix for the bug where HandleInput fields (type="other") were not receiving loop items because: 1. Fields with type="other" are skipped during field param processing 2. The updated_raw_params flag was being reset too early 3. Multiple build_params() calls would rebuild params, losing the injected values """ def test_updated_raw_params_flag_persists_across_multiple_build_params_calls(self): """Test that updated_raw_params flag persists across multiple build_params() calls. This is the core fix: the flag must stay True through ALL build_params() calls during initialization, not just the first one. """ from unittest.mock import MagicMock from lfx.schema.data import Data # Create a mock vertex with the necessary attributes mock_vertex = MagicMock() mock_vertex.graph = MagicMock() mock_vertex.updated_raw_params = False mock_vertex.raw_params = {} mock_vertex.params = {} # Import the actual build_params method from lfx.graph.vertex.base import Vertex # Bind build_params to our mock mock_vertex.build_params = Vertex.build_params.__get__(mock_vertex) # Simulate loop item injection test_data = Data(text="test item") mock_vertex.raw_params = {"input_data": test_data} mock_vertex.params = {"input_data": test_data} mock_vertex.updated_raw_params = True # First build_params() call - should skip and keep flag True mock_vertex.build_params() assert mock_vertex.updated_raw_params is True, "Flag should persist after first build_params()" # Second build_params() call - should also skip and keep flag True mock_vertex.build_params() assert mock_vertex.updated_raw_params is True, "Flag should persist after second build_params()" # Third build_params() call - should still skip and keep flag True mock_vertex.build_params() assert mock_vertex.updated_raw_params is True, "Flag should persist after third build_params()" def test_update_raw_params_sets_flag_and_updates_params(self): """Test that update_raw_params sets the flag and updates both raw_params and params. This verifies that when we inject loop items via update_raw_params: 1. Both raw_params and params are updated 2. The updated_raw_params flag is set to True 3. This protects against build_params() rebuilding """ from unittest.mock import MagicMock from lfx.graph.vertex.base import Vertex from lfx.schema.data import Data # Create a mock vertex with minimal setup mock_vertex = MagicMock() mock_vertex.raw_params = {"existing_param": "value"} mock_vertex.params = {"existing_param": "value"} mock_vertex.updated_raw_params = False # Bind the actual update_raw_params method mock_vertex.update_raw_params = Vertex.update_raw_params.__get__(mock_vertex) # Inject loop item test_data = Data(text="test item") mock_vertex.update_raw_params({"input_data": test_data}, overwrite=True) # Verify both raw_params and params are updated assert "input_data" in mock_vertex.raw_params assert mock_vertex.raw_params["input_data"] == test_data assert "input_data" in mock_vertex.params assert mock_vertex.params["input_data"] == test_data # Verify flag is set assert mock_vertex.updated_raw_params is True @pytest.mark.asyncio async def test_loop_item_injection_via_execute_loop_body(self): """Test that execute_loop_body actually injects loop items into vertex raw_params. This is an integration-style test that exercises the actual loop_utils.py code path, verifying that update_raw_params() is called with loop items during execution. """ from unittest.mock import MagicMock from lfx.schema.data import Data # Track calls to update_raw_params update_raw_params_calls = [] def mock_update_raw_params(params, overwrite=False): # noqa: FBT002 update_raw_params_calls.append((params, overwrite)) # Create mock vertex that tracks update_raw_params calls mock_start_vertex = MagicMock() mock_start_vertex.id = "start_vertex" mock_start_vertex.custom_component = MagicMock() mock_start_vertex.update_raw_params = mock_update_raw_params # Create mock subgraph def create_mock_subgraph(_vertex_ids): mock_subgraph = MagicMock() mock_subgraph._vertices = [ {"id": "start_vertex", "data": {"node": {"template": {"input_data": {"value": None}}}}} ] mock_subgraph.prepare = MagicMock() mock_subgraph.get_vertex = MagicMock(return_value=mock_start_vertex) # Mock async_start to yield valid results async def mock_async_start(**_kwargs): yield MagicMock(valid=True, result_dict=MagicMock(outputs={})) mock_subgraph.async_start = mock_async_start return mock_subgraph mock_graph = MagicMock() mock_graph.create_subgraph = create_subgraph_context_manager_mock(create_mock_subgraph) # Test data data_list = [ Data(text="First item"), Data(text="Second item"), ] # Mock edge with field_name mock_edge = MagicMock() mock_edge.target_handle.field_name = "input_data" # Execute loop body await execute_loop_body( graph=mock_graph, data_list=data_list, loop_body_vertex_ids={"start_vertex"}, start_vertex_id="start_vertex", start_edge=mock_edge, end_vertex_id="start_vertex", event_manager=None, ) # Verify update_raw_params was called for each loop item assert len(update_raw_params_calls) == 2, "Should call update_raw_params for each loop iteration" # Verify first call had first item first_call_params, first_call_overwrite = update_raw_params_calls[0] assert "input_data" in first_call_params assert first_call_params["input_data"].text == "First item" assert first_call_overwrite is True # Verify second call had second item second_call_params, second_call_overwrite = update_raw_params_calls[1] assert "input_data" in second_call_params assert second_call_params["input_data"].text == "Second item" assert second_call_overwrite is True class TestGetLoopBodyVertices: """Tests for get_loop_body_vertices utility function.""" def test_returns_empty_set_when_no_outgoing_edges(self): """Test when loop has no outgoing edges.""" class MockVertex: outgoing_edges = [] id = "loop" class MockGraph: successor_map = {} result = get_loop_body_vertices( vertex=MockVertex(), graph=MockGraph(), get_incoming_edge_by_target_param_fn=lambda _: None, ) assert result == set() def test_returns_empty_set_when_no_feedback_vertex(self): """Test when there's no vertex feeding back to loop.""" class MockEdge: class SourceHandle: name = "item" source_handle = SourceHandle() target_id = "component_a" class MockVertex: outgoing_edges = [MockEdge()] id = "loop" class MockGraph: successor_map = {"component_a": []} result = get_loop_body_vertices( vertex=MockVertex(), graph=MockGraph(), get_incoming_edge_by_target_param_fn=lambda _: None, ) assert result == set() def test_identifies_loop_body_vertices(self): """Test identification of vertices in a loop body.""" class MockEdge: class SourceHandle: name = "item" source_handle = SourceHandle() target_id = "component_a" class MockVertex: outgoing_edges = [MockEdge()] id = "loop_component" class MockGraph: successor_map = { "component_a": ["component_b"], "component_b": ["feedback_vertex"], "feedback_vertex": [], } def get_incoming_edge(param): return "feedback_vertex" if param == "item" else None result = get_loop_body_vertices( vertex=MockVertex(), graph=MockGraph(), get_incoming_edge_by_target_param_fn=get_incoming_edge, ) assert "component_a" in result assert "component_b" in result assert "feedback_vertex" in result def test_includes_predecessors_of_loop_body(self): """Test that predecessors of loop body vertices are included.""" class MockEdge: class SourceHandle: name = "item" source_handle = SourceHandle() target_id = "processing_vertex" class MockVertex: outgoing_edges = [MockEdge()] id = "loop_component" class MockGraph: successor_map = { "llm_model": ["processing_vertex"], "processing_vertex": ["feedback_vertex"], "feedback_vertex": [], } def get_incoming_edge(param): return "feedback_vertex" if param == "item" else None result = get_loop_body_vertices( vertex=MockVertex(), graph=MockGraph(), get_incoming_edge_by_target_param_fn=get_incoming_edge, ) assert "llm_model" in result assert "processing_vertex" in result assert "feedback_vertex" in result def test_excludes_loop_component_from_predecessors(self): """Test that the loop component itself is not included as a predecessor.""" class MockEdge: class SourceHandle: name = "item" source_handle = SourceHandle() target_id = "component_a" class MockVertex: outgoing_edges = [MockEdge()] id = "loop_component" class MockGraph: successor_map = { "loop_component": ["component_a"], "component_a": ["feedback_vertex"], "feedback_vertex": [], } def get_incoming_edge(param): return "feedback_vertex" if param == "item" else None result = get_loop_body_vertices( vertex=MockVertex(), graph=MockGraph(), get_incoming_edge_by_target_param_fn=get_incoming_edge, ) assert "loop_component" not in result assert "component_a" in result assert "feedback_vertex" in result
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/components/flow_controls/test_loop_events.py", "license": "MIT License", "lines": 446, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/components/flow_controls/test_loop_parser_integration.py
"""Integration test for Loop + Parser bug fix. This test reproduces the bug reported where a Parser component inside a Loop was receiving None during the build phase, causing: "Unsupported input type: <class 'NoneType'>. Expected DataFrame or Data." """ import pytest from lfx.components.flow_controls.loop import LoopComponent from lfx.components.input_output import ChatOutput from lfx.components.processing.parser import ParserComponent from lfx.graph import Graph from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame class TestLoopParserIntegration: """Test that Parser component works correctly inside a Loop.""" @pytest.mark.asyncio async def test_parser_receives_loop_item_during_build(self): """Test that Parser gets loop item before validation during build. This reproduces the bug where Parser would get None during prepare() and fail validation. """ # Create loop with test data loop = LoopComponent(_id="loop") test_data = [ Data(text="First item"), Data(text="Second item"), Data(text="Third item"), ] loop.set(data=DataFrame(test_data)) # Create parser that receives loop items parser = ParserComponent(_id="parser") parser.set( input_data=loop.item_output, pattern="Item: {text}", mode="Parser", ) # Create output chat_output = ChatOutput(_id="output") chat_output.set(input_value=parser.parsed_text) # Connect parser output back to loop done (completes the loop body) loop.set(item=chat_output.message) # Build graph - this should NOT fail with "Unsupported input type: NoneType" graph = Graph(loop, chat_output) # Execute the loop - parser should receive each item correctly # The key test is that this DOESN'T raise "Unsupported input type: NoneType" results = [result async for result in graph.async_start()] # Verify execution completed without errors assert len(results) > 0 # Check that we got valid results (not all errors) valid_results = [r for r in results if hasattr(r, "valid") and r.valid] assert len(valid_results) > 0
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/components/flow_controls/test_loop_parser_integration.py", "license": "MIT License", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/graph/graph/test_subgraph_isolation.py
"""Tests for subgraph isolation to verify if create_subgraph provides sufficient state isolation. These tests verify whether calling create_subgraph multiple times from the same parent graph produces isolated subgraphs that don't share state from previous executions. """ import pytest from lfx.components.input_output import ChatInput, ChatOutput, TextOutputComponent from lfx.graph import Graph class TestSubgraphIsolation: """Tests to verify subgraph state isolation.""" @pytest.mark.asyncio async def test_create_subgraph_provides_fresh_state(self): """Test that calling create_subgraph multiple times gives fresh unbuilt state each time.""" # Create a simple graph: chat_input -> text_output -> chat_output chat_input = ChatInput(_id="chat_input") chat_input.set(input_value="test message") text_output = TextOutputComponent(_id="text_output") text_output.set(input_value=chat_input.message_response) chat_output = ChatOutput(_id="chat_output") chat_output.set(input_value=text_output.text_response) parent_graph = Graph(chat_input, chat_output) # Create first subgraph with just text_output subgraph_vertex_ids = {"text_output"} # Use async context manager for subgraph1 async with parent_graph.create_subgraph(subgraph_vertex_ids) as subgraph1: subgraph1.prepare() # Verify subgraph1 vertex is not built initially vertex1 = subgraph1.get_vertex("text_output") assert not vertex1.built, "Subgraph1 vertex should not be built initially" # Run the first subgraph async for _ in subgraph1.async_start(): pass # Verify subgraph1 vertex is now built assert vertex1.built, "Subgraph1 vertex should be built after execution" # Create second subgraph from the SAME parent graph async with parent_graph.create_subgraph(subgraph_vertex_ids) as subgraph2: subgraph2.prepare() # Verify subgraph2 vertex is fresh (not built) vertex2 = subgraph2.get_vertex("text_output") assert not vertex2.built, "Subgraph2 vertex should not be built (should be fresh)" # Verify they are different vertex objects assert vertex1 is not vertex2, "Subgraph vertices should be different objects" @pytest.mark.asyncio async def test_create_subgraph_isolates_context(self): """Test that subgraph context modifications don't affect parent or other subgraphs.""" chat_input = ChatInput(_id="chat_input") chat_input.set(input_value="test") chat_output = ChatOutput(_id="chat_output") chat_output.set(input_value=chat_input.message_response) parent_graph = Graph(chat_input, chat_output, context={"shared_key": "original_value"}) # Create first subgraph async with parent_graph.create_subgraph({"chat_input", "chat_output"}) as subgraph1: # Modify subgraph1's context subgraph1.context["shared_key"] = "modified_in_subgraph1" subgraph1.context["new_key"] = "new_value" # Create second subgraph (nested to allow comparison) async with parent_graph.create_subgraph({"chat_input", "chat_output"}) as subgraph2: # Verify parent context is unchanged assert parent_graph.context["shared_key"] == "original_value", ( "Parent context should not be modified by subgraph" ) assert "new_key" not in parent_graph.context, "New key should not appear in parent context" # Verify subgraph2 has original context (shallow copy behavior) # Note: This tests if the context is properly copied assert subgraph2.context["shared_key"] == "original_value", ( "Subgraph2 should have original context value" ) @pytest.mark.asyncio async def test_create_subgraph_isolates_run_state(self): """Test that subgraph run state (run_manager, queues) is isolated.""" chat_input = ChatInput(_id="chat_input") chat_input.set(input_value="test") text_output = TextOutputComponent(_id="text_output") text_output.set(input_value=chat_input.message_response) parent_graph = Graph(chat_input, text_output) subgraph_ids = {"chat_input", "text_output"} # Create and run first subgraph async with parent_graph.create_subgraph(subgraph_ids) as subgraph1: subgraph1.prepare() # Capture initial run queue initial_queue = list(subgraph1._run_queue) assert len(initial_queue) > 0, "Subgraph1 should have items in run queue after prepare" # Run subgraph1 to completion async for _ in subgraph1.async_start(): pass # Run queue should be empty after completion assert len(subgraph1._run_queue) == 0, "Subgraph1 run queue should be empty after completion" # Create second subgraph async with parent_graph.create_subgraph(subgraph_ids) as subgraph2: subgraph2.prepare() # Subgraph2 should have fresh run queue assert len(subgraph2._run_queue) > 0, "Subgraph2 should have items in run queue (fresh state)" assert list(subgraph2._run_queue) == initial_queue, "Subgraph2 run queue should match initial state" @pytest.mark.asyncio async def test_create_subgraph_isolates_vertex_results(self): """Test that vertex results from one subgraph don't leak to another.""" chat_input = ChatInput(_id="chat_input") chat_input.set(input_value="first_message") text_output = TextOutputComponent(_id="text_output") text_output.set(input_value=chat_input.message_response) parent_graph = Graph(chat_input, text_output) subgraph_ids = {"chat_input", "text_output"} # Create and run first subgraph async with parent_graph.create_subgraph(subgraph_ids) as subgraph1: subgraph1.prepare() async for _ in subgraph1.async_start(): pass # Get vertex from subgraph1 vertex1 = subgraph1.get_vertex("text_output") # Create second subgraph async with parent_graph.create_subgraph(subgraph_ids) as subgraph2: subgraph2.prepare() # Verify subgraph2 vertex has no results yet vertex2 = subgraph2.get_vertex("text_output") assert vertex2.results == {}, "Subgraph2 vertex should have empty results initially" # Verify they are different result dictionaries assert vertex1.results is not vertex2.results, "Result dicts should be different objects" @pytest.mark.asyncio async def test_mutable_context_objects_are_shared(self): """Test that mutable objects in context ARE shared between subgraphs. This is intentional behavior - loop iterations need to share state through context. The shallow copy allows subgraphs to communicate via mutable context objects. """ mutable_list = ["item1"] mutable_dict = {"key": "value"} chat_input = ChatInput(_id="chat_input") chat_output = ChatOutput(_id="chat_output") chat_output.set(input_value=chat_input.message_response) parent_graph = Graph( chat_input, chat_output, context={ "mutable_list": mutable_list, "mutable_dict": mutable_dict, }, ) # Create first subgraph async with parent_graph.create_subgraph({"chat_input", "chat_output"}) as subgraph1: # Modify mutable objects in subgraph1's context subgraph1.context["mutable_list"].append("item2") subgraph1.context["mutable_dict"]["new_key"] = "new_value" # Create second subgraph (nested to allow comparison) async with parent_graph.create_subgraph({"chat_input", "chat_output"}) as subgraph2: # Mutable objects SHOULD be shared (intentional for loop state communication) assert subgraph2.context["mutable_list"] is subgraph1.context["mutable_list"], ( "Mutable list should be shared between subgraphs" ) assert subgraph2.context["mutable_dict"] is subgraph1.context["mutable_dict"], ( "Mutable dict should be shared between subgraphs" ) # Changes from subgraph1 should be visible in subgraph2 assert "item2" in subgraph2.context["mutable_list"], "Subgraph2 should see item2 added by subgraph1" assert subgraph2.context["mutable_dict"]["new_key"] == "new_value", ( "Subgraph2 should see new_key added by subgraph1" )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/graph/graph/test_subgraph_isolation.py", "license": "MIT License", "lines": 154, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/agentic/api/router.py
"""Langflow Assistant API router. This module provides the HTTP endpoints for the Langflow Assistant. All business logic is delegated to service modules. """ import uuid from dataclasses import dataclass from uuid import UUID from fastapi import APIRouter, HTTPException from fastapi.responses import StreamingResponse from lfx.base.models.unified_models import ( get_all_variables_for_provider, get_model_provider_variable_mapping, get_provider_required_variable_keys, get_unified_models_detailed, ) from lfx.log.logger import logger from sqlalchemy.ext.asyncio import AsyncSession from langflow.agentic.api.schemas import AssistantRequest from langflow.agentic.services.assistant_service import ( execute_flow_with_validation, execute_flow_with_validation_streaming, ) from langflow.agentic.services.flow_executor import execute_flow_file from langflow.agentic.services.flow_types import ( LANGFLOW_ASSISTANT_FLOW, MAX_VALIDATION_RETRIES, ) from langflow.agentic.services.provider_service import ( DEFAULT_MODELS, PREFERRED_PROVIDERS, get_enabled_providers_for_user, ) from langflow.api.utils.core import CurrentActiveUser, DbSession router = APIRouter(prefix="/agentic", tags=["Agentic"]) @dataclass(frozen=True) class _AssistantContext: """Resolved provider, model, and execution context for assistant endpoints.""" provider: str model_name: str api_key_name: str session_id: str global_vars: dict[str, str] max_retries: int async def _resolve_assistant_context( request: AssistantRequest, user_id: UUID, session: AsyncSession, ) -> _AssistantContext: """Resolve provider, model, API key, and build execution context. Raises: HTTPException: If provider is not configured or API key is missing. """ provider_variable_map = get_model_provider_variable_mapping() enabled_providers, _ = await get_enabled_providers_for_user(user_id, session) if not enabled_providers: raise HTTPException( status_code=400, detail="No model provider is configured. Please configure at least one model provider in Settings.", ) provider = request.provider if not provider: for preferred in PREFERRED_PROVIDERS: if preferred in enabled_providers: provider = preferred break if not provider: provider = enabled_providers[0] if provider not in enabled_providers: raise HTTPException( status_code=400, detail=f"Provider '{provider}' is not configured. Available providers: {enabled_providers}", ) api_key_name = provider_variable_map.get(provider) if not api_key_name: raise HTTPException(status_code=400, detail=f"Unknown provider: {provider}") model_name = request.model_name or DEFAULT_MODELS.get(provider) or "" # Get all configured variables for the provider provider_vars = await get_all_variables_for_provider(user_id, provider) # Validate all required variables are present required_keys = get_provider_required_variable_keys(provider) missing_keys = [key for key in required_keys if not provider_vars.get(key)] if missing_keys: raise HTTPException( status_code=400, detail=( f"Missing required configuration for {provider}: {', '.join(missing_keys)}. " "Please configure these in Settings > Model Providers." ), ) global_vars: dict[str, str] = { "USER_ID": str(user_id), "FLOW_ID": request.flow_id, "MODEL_NAME": model_name, "PROVIDER": provider, } # Inject all provider variables into the global context global_vars.update(provider_vars) session_id = request.session_id or str(uuid.uuid4()) max_retries = request.max_retries if request.max_retries is not None else MAX_VALIDATION_RETRIES return _AssistantContext( provider=provider, model_name=model_name, api_key_name=api_key_name, session_id=session_id, global_vars=global_vars, max_retries=max_retries, ) @router.post("/execute/{flow_name}") async def execute_named_flow(flow_name: str, request: AssistantRequest, current_user: CurrentActiveUser) -> dict: """Execute a named flow from the flows directory.""" user_id = current_user.id global_vars = { "USER_ID": str(user_id), "FLOW_ID": request.flow_id, } if request.component_id: global_vars["COMPONENT_ID"] = request.component_id if request.field_name: global_vars["FIELD_NAME"] = request.field_name try: # Check for OpenAI variables (required for some assistant features) openai_vars = await get_all_variables_for_provider(user_id, "OpenAI") global_vars.update(openai_vars) except (ValueError, HTTPException): logger.debug("OpenAI variables not configured, continuing without them") flow_filename = f"{flow_name}.json" # Generate unique session_id per request to isolate memory session_id = str(uuid.uuid4()) return await execute_flow_file( flow_filename=flow_filename, input_value=request.input_value, global_variables=global_vars, verbose=True, session_id=session_id, ) @router.get("/check-config") async def check_assistant_config( current_user: CurrentActiveUser, session: DbSession, ) -> dict: """Check if the Langflow Assistant is properly configured. Returns available providers with their configured status and available models. """ user_id = current_user.id enabled_providers, _ = await get_enabled_providers_for_user(user_id, session) all_providers = [] if enabled_providers: models_by_provider = get_unified_models_detailed( providers=enabled_providers, include_unsupported=False, include_deprecated=False, model_type="language", ) for provider_dict in models_by_provider: provider_name = provider_dict.get("provider") models = provider_dict.get("models", []) model_list = [] for model in models: model_name = model.get("model_name") display_name = model.get("display_name", model_name) metadata = model.get("metadata", {}) is_deprecated = metadata.get("deprecated", False) is_not_supported = metadata.get("not_supported", False) if not is_deprecated and not is_not_supported: model_list.append( { "name": model_name, "display_name": display_name, } ) default_model = DEFAULT_MODELS.get(provider_name) if not default_model and model_list: default_model = model_list[0]["name"] if model_list: all_providers.append( { "name": provider_name, "configured": True, "default_model": default_model, "models": model_list, } ) default_provider = None default_model = None providers_with_models = [p["name"] for p in all_providers] for preferred in PREFERRED_PROVIDERS: if preferred in providers_with_models: default_provider = preferred for p in all_providers: if p["name"] == preferred: default_model = p["default_model"] break break if not default_provider and all_providers: default_provider = all_providers[0]["name"] default_model = all_providers[0]["default_model"] return { "configured": len(enabled_providers) > 0, "configured_providers": enabled_providers, "providers": all_providers, "default_provider": default_provider, "default_model": default_model, } @router.post("/assist") async def assist( request: AssistantRequest, current_user: CurrentActiveUser, session: DbSession, ) -> dict: """Chat with the Langflow Assistant.""" ctx = await _resolve_assistant_context(request, current_user.id, session) logger.info(f"Executing {LANGFLOW_ASSISTANT_FLOW} with {ctx.provider}/{ctx.model_name}") return await execute_flow_with_validation( flow_filename=LANGFLOW_ASSISTANT_FLOW, input_value=request.input_value or "", global_variables=ctx.global_vars, max_retries=ctx.max_retries, user_id=str(current_user.id), session_id=ctx.session_id, provider=ctx.provider, model_name=ctx.model_name, api_key_var=ctx.api_key_name, ) @router.post("/assist/stream") async def assist_stream( request: AssistantRequest, current_user: CurrentActiveUser, session: DbSession, ) -> StreamingResponse: """Chat with the Langflow Assistant with streaming progress updates.""" ctx = await _resolve_assistant_context(request, current_user.id, session) return StreamingResponse( execute_flow_with_validation_streaming( flow_filename=LANGFLOW_ASSISTANT_FLOW, input_value=request.input_value or "", global_variables=ctx.global_vars, max_retries=ctx.max_retries, user_id=str(current_user.id), session_id=ctx.session_id, provider=ctx.provider, model_name=ctx.model_name, api_key_var=ctx.api_key_name, ), media_type="text/event-stream", headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", }, )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/api/router.py", "license": "MIT License", "lines": 249, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/agentic/api/schemas.py
"""Request and response schemas for the Assistant API.""" from typing import Literal from pydantic import BaseModel # All possible step types for SSE progress events StepType = Literal[ "generating", # LLM is generating response "generating_component", # LLM is generating component code "generation_complete", # LLM finished generating "extracting_code", # Extracting Python code from response "validating", # Validating component code "validated", # Validation succeeded "validation_failed", # Validation failed "retrying", # About to retry with error context ] class AssistantRequest(BaseModel): """Request model for assistant interactions.""" flow_id: str component_id: str | None = None field_name: str | None = None input_value: str | None = None max_retries: int | None = None model_name: str | None = None provider: str | None = None session_id: str | None = None class ValidationResult(BaseModel): """Result of component code validation.""" is_valid: bool code: str | None = None error: str | None = None class_name: str | None = None
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/api/schemas.py", "license": "MIT License", "lines": 30, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/agentic/helpers/code_extraction.py
"""Python code extraction from markdown responses.""" import re PYTHON_CODE_BLOCK_PATTERN = r"```python\s*([\s\S]*?)```" GENERIC_CODE_BLOCK_PATTERN = r"```\s*([\s\S]*?)```" UNCLOSED_PYTHON_BLOCK_PATTERN = r"```python\s*([\s\S]*)$" UNCLOSED_GENERIC_BLOCK_PATTERN = r"```\s*([\s\S]*)$" def extract_python_code(text: str) -> str | None: """Extract Python code from markdown code blocks. Handles both closed (```python ... ```) and unclosed blocks. Returns the first code block that appears to be a Langflow component. """ matches = _find_code_blocks(text) if not matches: return None return _find_component_code(matches) or matches[0].strip() def _find_code_blocks(text: str) -> list[str]: """Find all code blocks in text, handling both closed and unclosed blocks.""" matches = re.findall(PYTHON_CODE_BLOCK_PATTERN, text, re.IGNORECASE) if matches: return matches matches = re.findall(GENERIC_CODE_BLOCK_PATTERN, text) if matches: return matches return _find_unclosed_code_block(text) def _find_unclosed_code_block(text: str) -> list[str]: """Handle LLM responses that don't close the code block with ```.""" for pattern in [UNCLOSED_PYTHON_BLOCK_PATTERN, UNCLOSED_GENERIC_BLOCK_PATTERN]: match = re.search(pattern, text, re.IGNORECASE) if match: code = match.group(1).rstrip("`").strip() return [code] if code else [] return [] def _find_component_code(matches: list[str]) -> str | None: """Find the first match that looks like a Langflow component.""" for match in matches: if "class " in match and "Component" in match: return match.strip() return None # Alias for backward compatibility extract_component_code = extract_python_code
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/helpers/code_extraction.py", "license": "MIT License", "lines": 40, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/agentic/helpers/error_handling.py
"""Error handling and categorization for the Assistant API.""" MAX_ERROR_MESSAGE_LENGTH = 150 MIN_MEANINGFUL_PART_LENGTH = 10 ERROR_PATTERNS: list[tuple[list[str], str]] = [ (["rate_limit", "rate limit", "429"], "Rate limit exceeded. Please wait a moment and try again."), (["authentication", "api_key", "unauthorized", "401"], "Authentication failed. Check your API key."), (["quota", "billing", "insufficient"], "API quota exceeded. Please check your account billing."), (["timeout", "timed out"], "Request timed out. Please try again."), (["connection", "network"], "Connection error. Please check your network and try again."), (["500", "internal server error"], "Server error. Please try again later."), ] def extract_friendly_error(error_msg: str) -> str: """Convert technical API errors into user-friendly messages.""" error_lower = error_msg.lower() for patterns, friendly_message in ERROR_PATTERNS: if any(pattern in error_lower or pattern in error_msg for pattern in patterns): return friendly_message model_missing_terms = ("not found", "does not exist", "not available") if "model" in error_lower and any(term in error_lower for term in model_missing_terms): return "Model not available. Please select a different model." if "content" in error_lower and any(term in error_lower for term in ["filter", "policy", "safety"]): return "Request blocked by content policy. Please modify your prompt." return _truncate_error_message(error_msg) def _truncate_error_message(error_msg: str) -> str: """Truncate long error messages, preserving meaningful content.""" if len(error_msg) <= MAX_ERROR_MESSAGE_LENGTH: return error_msg if ":" in error_msg: for part in error_msg.split(":"): stripped = part.strip() if MIN_MEANINGFUL_PART_LENGTH < len(stripped) < MAX_ERROR_MESSAGE_LENGTH: return stripped return f"{error_msg[:MAX_ERROR_MESSAGE_LENGTH]}..."
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/helpers/error_handling.py", "license": "MIT License", "lines": 33, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/agentic/helpers/sse.py
"""Server-Sent Events (SSE) formatting helpers.""" import json from langflow.agentic.api.schemas import StepType def format_progress_event( step: StepType, attempt: int, max_attempts: int, *, message: str | None = None, error: str | None = None, class_name: str | None = None, component_code: str | None = None, ) -> str: """Format SSE progress event. Args: step: The current step in the process attempt: Current attempt number (1-indexed) max_attempts: Maximum number of attempts message: Optional human-readable message error: Optional error message (for validation_failed step) class_name: Optional class name (for validation_failed step) component_code: Optional component code (for validation_failed step) """ data: dict = { "event": "progress", "step": step, "attempt": attempt, "max_attempts": max_attempts, } if message: data["message"] = message if error: data["error"] = error if class_name: data["class_name"] = class_name if component_code: data["component_code"] = component_code return f"data: {json.dumps(data)}\n\n" def format_complete_event(data: dict) -> str: """Format SSE complete event.""" return f"data: {json.dumps({'event': 'complete', 'data': data})}\n\n" def format_error_event(message: str) -> str: """Format SSE error event.""" return f"data: {json.dumps({'event': 'error', 'message': message})}\n\n" def format_token_event(chunk: str) -> str: """Format SSE token event for streaming LLM output.""" return f"data: {json.dumps({'event': 'token', 'chunk': chunk})}\n\n" def format_cancelled_event() -> str: """Format SSE cancelled event when client disconnects.""" return f"data: {json.dumps({'event': 'cancelled'})}\n\n"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/helpers/sse.py", "license": "MIT License", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/agentic/helpers/validation.py
"""Component code validation.""" import re from lfx.custom.validate import create_class, extract_class_name from langflow.agentic.api.schemas import ValidationResult # Regex pattern to extract class name that inherits from Component CLASS_NAME_PATTERN = re.compile(r"class\s+(\w+)\s*\([^)]*Component[^)]*\)") def _extract_class_name_regex(code: str) -> str | None: """Extract class name using regex (fallback for syntax errors).""" match = CLASS_NAME_PATTERN.search(code) return match.group(1) if match else None def _safe_extract_class_name(code: str) -> str | None: """Extract class name with fallback to regex for broken code.""" try: return extract_class_name(code) except (ValueError, SyntaxError, TypeError): return _extract_class_name_regex(code) def validate_component_code(code: str) -> ValidationResult: """Validate component code by attempting to create and instantiate the class. This instantiates the class to trigger __init__ validation checks, such as overlapping input/output names. """ class_name = _safe_extract_class_name(code) try: if class_name is None: msg = "Could not extract class name from code" raise ValueError(msg) # create_class returns the class (not an instance) component_class = create_class(code, class_name) # Instantiate the class to trigger __init__ validation # This catches errors like overlapping input/output names component_class() return ValidationResult(is_valid=True, code=code, class_name=class_name) except ( ValueError, TypeError, SyntaxError, NameError, ModuleNotFoundError, AttributeError, ImportError, RuntimeError, KeyError, ) as e: return ValidationResult(is_valid=False, code=code, error=f"{type(e).__name__}: {e}", class_name=class_name)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/helpers/validation.py", "license": "MIT License", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/agentic/services/assistant_service.py
"""Assistant service with validation and retry logic.""" import asyncio from collections.abc import AsyncGenerator, Callable, Coroutine from typing import Any from fastapi import HTTPException from lfx.log.logger import logger from langflow.agentic.helpers.code_extraction import extract_component_code from langflow.agentic.helpers.error_handling import extract_friendly_error from langflow.agentic.helpers.sse import ( format_cancelled_event, format_complete_event, format_error_event, format_progress_event, format_token_event, ) from langflow.agentic.helpers.validation import validate_component_code from langflow.agentic.services.flow_executor import ( execute_flow_file, execute_flow_file_streaming, extract_response_text, ) from langflow.agentic.services.flow_types import ( MAX_VALIDATION_RETRIES, VALIDATION_RETRY_TEMPLATE, VALIDATION_UI_DELAY_SECONDS, ) from langflow.agentic.services.helpers.intent_classification import classify_intent async def execute_flow_with_validation( flow_filename: str, input_value: str, global_variables: dict[str, str], *, max_retries: int = MAX_VALIDATION_RETRIES, user_id: str | None = None, session_id: str | None = None, provider: str | None = None, model_name: str | None = None, api_key_var: str | None = None, ) -> dict: """Execute flow and validate the generated component code. If the response contains Python code, it validates the code. If validation fails, re-executes the flow with error context. Continues until valid code is generated or max retries reached. """ current_input = input_value attempt = 0 while attempt <= max_retries: attempt += 1 logger.info(f"Component generation attempt {attempt}/{max_retries + 1}") result = await execute_flow_file( flow_filename=flow_filename, input_value=current_input, global_variables=global_variables, verbose=True, user_id=user_id, session_id=session_id, provider=provider, model_name=model_name, api_key_var=api_key_var, ) response_text = extract_response_text(result) code = extract_component_code(response_text) if not code: logger.debug("No Python code found in response, returning as-is") return result logger.info("Validating generated component code...") validation = validate_component_code(code) if validation.is_valid: logger.info(f"Component '{validation.class_name}' validated successfully!") return { **result, "validated": True, "class_name": validation.class_name, "component_code": code, "validation_attempts": attempt, } logger.warning(f"Validation failed (attempt {attempt}): {validation.error}") if attempt > max_retries: logger.error(f"Max retries ({max_retries}) reached. Returning last result with error.") return { **result, "validated": False, "validation_error": validation.error, "validation_attempts": attempt, } current_input = VALIDATION_RETRY_TEMPLATE.format(error=validation.error, code=code) logger.info("Retrying with error context...") # Safety return: the while loop always returns via internal checks above return { **result, "validated": False, "validation_error": validation.error, "validation_attempts": attempt, } async def execute_flow_with_validation_streaming( flow_filename: str, input_value: str, global_variables: dict[str, str], *, max_retries: int = MAX_VALIDATION_RETRIES, user_id: str | None = None, session_id: str | None = None, provider: str | None = None, model_name: str | None = None, api_key_var: str | None = None, is_disconnected: Callable[[], Coroutine[Any, Any, bool]] | None = None, ) -> AsyncGenerator[str, None]: """Execute flow with validation, yielding SSE progress and token events. SSE Event Flow: For component generation (detected from user input): 1. generating_component - Show reasoning UI (no token streaming) 2. extracting_code, validating, etc. For Q&A: 1. generating - LLM is generating response 1a. token events - Real-time token streaming 2. complete - Done Note: Component generation is detected by analyzing the user's input. """ current_input = input_value # Classify intent using LLM (handles multi-language support) # This translates the input and determines if user wants to generate a component or ask a question intent_result = await classify_intent( text=input_value, global_variables=global_variables, user_id=user_id, session_id=session_id, provider=provider, model_name=model_name, api_key_var=api_key_var, ) # Check if this is a component generation request based on LLM classification is_component_request = intent_result.intent == "generate_component" logger.info(f"Intent classification: {intent_result.intent} (is_component_request={is_component_request})") # Create cancel event for propagating cancellation to flow executor cancel_event = asyncio.Event() # Helper to check if client disconnected async def check_cancelled() -> bool: if cancel_event.is_set(): return True if is_disconnected is not None: return await is_disconnected() return False try: # First attempt (attempt=0) doesn't count as retry # Retries are attempt 1, 2, 3... up to max_retries for attempt in range(max_retries + 1): # 0 = first try, 1..max_retries = retries # Check if client disconnected before starting if await check_cancelled(): logger.info("Client disconnected, cancelling generation") yield format_cancelled_event() return logger.debug(f"Starting attempt {attempt}, is_disconnected provided: {is_disconnected is not None}") # Step 1: Generating (different step name based on intent) yield format_progress_event( "generating_component" if is_component_request else "generating", attempt, # 0 for first try, 1+ for retries max_retries, # max retries (not counting first try) message="Generating response...", ) result = None cancelled = False flow_generator = execute_flow_file_streaming( flow_filename=flow_filename, input_value=current_input, global_variables=global_variables, user_id=user_id, session_id=session_id, provider=provider, model_name=model_name, api_key_var=api_key_var, is_disconnected=is_disconnected, cancel_event=cancel_event, ) try: # Use streaming executor to get token events async for event_type, event_data in flow_generator: if event_type == "token": # Only stream tokens for Q&A, not for component generation if not is_component_request: yield format_token_event(event_data) elif event_type == "end": # Flow completed, store result result = event_data elif event_type == "cancelled": # Flow was cancelled due to client disconnect logger.info("Flow execution cancelled by client disconnect") cancelled = True break except GeneratorExit: # This generator was closed (client disconnected) logger.info("Assistant generator closed, setting cancel event") cancel_event.set() await flow_generator.aclose() yield format_cancelled_event() return except HTTPException as e: friendly_msg = extract_friendly_error(str(e.detail)) logger.error(f"Flow execution failed: {friendly_msg}") yield format_error_event(friendly_msg) return except (ValueError, RuntimeError, OSError) as e: friendly_msg = extract_friendly_error(str(e)) logger.error(f"Flow execution failed: {friendly_msg}") yield format_error_event(friendly_msg) return # Handle cancellation if cancelled: yield format_cancelled_event() return if result is None: logger.error("Flow execution returned no result") yield format_error_event("Flow execution returned no result") return # Step 2: Generation complete yield format_progress_event( "generation_complete", attempt, max_retries, message="Response ready", ) # For Q&A responses, return immediately without code extraction/validation if not is_component_request: yield format_complete_event(result) return # Only extract and validate code for component generation requests response_text = extract_response_text(result) code = extract_component_code(response_text) if not code: # No code found even though user asked for component generation # Return as plain text response yield format_complete_event(result) return # Check for cancellation before extraction if await check_cancelled(): logger.info("Client disconnected before code extraction, cancelling") yield format_cancelled_event() return # Step 3: Extracting code (only shown when code is found) yield format_progress_event( "extracting_code", attempt, max_retries, message="Extracting Python code from response...", ) await asyncio.sleep(VALIDATION_UI_DELAY_SECONDS) # Check for cancellation before validation if await check_cancelled(): logger.info("Client disconnected before validation, cancelling") yield format_cancelled_event() return # Step 4: Validating yield format_progress_event( "validating", attempt, max_retries, message="Validating component code...", ) await asyncio.sleep(VALIDATION_UI_DELAY_SECONDS) validation = validate_component_code(code) if validation.is_valid: # Step 5a: Validated successfully logger.info(f"Component '{validation.class_name}' validated successfully") yield format_progress_event( "validated", attempt, max_retries, message=f"Component '{validation.class_name}' validated successfully!", ) await asyncio.sleep(VALIDATION_UI_DELAY_SECONDS) yield format_complete_event( { **result, "validated": True, "class_name": validation.class_name, "component_code": code, "validation_attempts": attempt, } ) return # Step 5b: Validation failed logger.warning(f"Validation failed (attempt {attempt}): {validation.error}") yield format_progress_event( "validation_failed", attempt, max_retries, message="Validation failed", error=validation.error, class_name=validation.class_name, component_code=code, ) await asyncio.sleep(VALIDATION_UI_DELAY_SECONDS) if attempt >= max_retries: # Max attempts reached, return with error yield format_complete_event( { **result, "validated": False, "validation_error": validation.error, "validation_attempts": attempt, "component_code": code, } ) return # Step 6: Retrying yield format_progress_event( "retrying", attempt, max_retries, message=f"Retrying with error context (attempt {attempt + 1}/{max_retries})...", error=validation.error, ) await asyncio.sleep(VALIDATION_UI_DELAY_SECONDS) current_input = VALIDATION_RETRY_TEMPLATE.format(error=validation.error, code=code) finally: # Always set cancel event when generator exits to stop any pending flow execution logger.debug("Assistant generator exiting, setting cancel event") cancel_event.set()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/services/assistant_service.py", "license": "MIT License", "lines": 317, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/agentic/services/flow_executor.py
"""Flow execution service. Orchestrates flow execution for both Python (.py) and JSON (.json) flows. Supports both synchronous and streaming execution modes. """ import asyncio import json from collections.abc import AsyncGenerator, Callable, Coroutine from typing import TYPE_CHECKING, Any from fastapi import HTTPException from lfx.cli.script_loader import extract_structured_result from lfx.events.event_manager import EventManager, create_default_event_manager from lfx.log.logger import logger from lfx.schema.schema import InputValueRequest from langflow.agentic.services.flow_types import ( STREAMING_QUEUE_MAX_SIZE, FlowExecutionResult, ) from langflow.agentic.services.helpers.event_consumer import consume_streaming_events from langflow.agentic.services.helpers.flow_loader import load_graph_for_execution, resolve_flow_path if TYPE_CHECKING: from lfx.graph.graph.base import Graph async def _run_graph_with_events( graph: "Graph", input_value: str | None, global_variables: dict[str, str] | None, user_id: str | None, session_id: str | None, event_manager: EventManager, event_queue: asyncio.Queue, execution_result: FlowExecutionResult, ) -> None: """Execute graph and store result, signaling completion via queue.""" try: if user_id: graph.user_id = user_id if session_id: graph.session_id = session_id if global_variables: if "request_variables" not in graph.context: graph.context["request_variables"] = {} graph.context["request_variables"].update(global_variables) graph.prepare() inputs = InputValueRequest(input_value=input_value) if input_value else None results = [result async for result in graph.async_start(inputs=inputs, event_manager=event_manager)] execution_result.result = extract_structured_result(results) except Exception as e: # noqa: BLE001 execution_result.error = e logger.error(f"Flow execution error: {e}") finally: await event_queue.put(None) async def execute_flow_file( flow_filename: str, input_value: str | None = None, global_variables: dict[str, str] | None = None, *, verbose: bool = False, # noqa: ARG001 user_id: str | None = None, session_id: str | None = None, provider: str | None = None, model_name: str | None = None, api_key_var: str | None = None, ) -> dict: """Execute a flow from a Python or JSON file. Supports both .py and .json flows. When both exist, .py takes priority. Args: flow_filename: Name of the flow file (e.g., "MyFlow.json" or "my_flow.py") input_value: Input value to pass to the flow global_variables: Dict of global variables to inject into the flow context verbose: Kept for backward compatibility (currently unused) user_id: User ID for components that require user context session_id: Unique session ID to isolate memory between requests provider: Model provider to inject into Agent nodes model_name: Model name to inject into Agent nodes api_key_var: API key variable name to inject into Agent nodes Returns: dict: Result from flow execution Raises: HTTPException: If flow file not found or execution fails """ flow_path, flow_type = resolve_flow_path(flow_filename) try: graph = await load_graph_for_execution(flow_path, flow_type, provider, model_name, api_key_var) if user_id: graph.user_id = user_id if session_id: graph.session_id = session_id if global_variables: if "request_variables" not in graph.context: graph.context["request_variables"] = {} graph.context["request_variables"].update(global_variables) graph.prepare() inputs = InputValueRequest(input_value=input_value) if input_value else None results = [result async for result in graph.async_start(inputs=inputs)] return extract_structured_result(results) except HTTPException: raise except Exception as e: logger.error(f"Flow execution error: {e}") raise HTTPException(status_code=500, detail="An error occurred while executing the flow.") from e async def execute_flow_file_streaming( flow_filename: str, input_value: str | None = None, global_variables: dict[str, str] | None = None, *, user_id: str | None = None, session_id: str | None = None, provider: str | None = None, model_name: str | None = None, api_key_var: str | None = None, is_disconnected: Callable[[], Coroutine[Any, Any, bool]] | None = None, cancel_event: asyncio.Event | None = None, ) -> AsyncGenerator[tuple[str, Any], None]: """Execute a flow from a Python or JSON file with token streaming. Supports both .py and .json flows. When both exist, .py takes priority. Yields events as they occur: - ("token", chunk): Token chunk from LLM streaming - ("end", result): Final result when flow completes - ("cancelled", {}): Flow was cancelled Args: flow_filename: Name of the flow file (e.g., "MyFlow.json" or "my_flow.py") input_value: Input value to pass to the flow global_variables: Dict of global variables to inject into the flow context user_id: User ID for components that require user context session_id: Unique session ID to isolate memory between requests provider: Model provider to inject into Agent nodes model_name: Model name to inject into Agent nodes api_key_var: API key variable name to inject into Agent nodes is_disconnected: Async function to check if client disconnected cancel_event: Event to signal cancellation from outside Yields: tuple[str, Any]: Event type and data pairs Raises: HTTPException: If flow file not found or execution fails """ flow_path, flow_type = resolve_flow_path(flow_filename) try: graph = await load_graph_for_execution(flow_path, flow_type, provider, model_name, api_key_var) except (json.JSONDecodeError, OSError, ValueError) as e: logger.error(f"Flow preparation error: {e}") raise HTTPException(status_code=500, detail="An error occurred while preparing the flow.") from e event_queue: asyncio.Queue[tuple[str, bytes, float] | None] = asyncio.Queue(maxsize=STREAMING_QUEUE_MAX_SIZE) event_manager = create_default_event_manager(event_queue) execution_result = FlowExecutionResult() flow_task = asyncio.create_task( _run_graph_with_events( graph=graph, input_value=input_value, global_variables=global_variables, user_id=user_id, session_id=session_id, event_manager=event_manager, event_queue=event_queue, execution_result=execution_result, ) ) cancelled = False try: async for event_type, chunk in consume_streaming_events(event_queue, is_disconnected, cancel_event): if event_type == "token": yield ("token", chunk) elif event_type == "end": break elif event_type == "cancelled": cancelled = True break except GeneratorExit: logger.info("Generator closed externally, cancelling flow") cancelled = True finally: if not flow_task.done(): flow_task.cancel() try: await flow_task except asyncio.CancelledError: logger.info("Flow task cancelled") if cancelled: yield ("cancelled", {}) return if execution_result.has_error: raise HTTPException( status_code=500, detail="An error occurred while executing the flow." ) from execution_result.error yield ("end", execution_result.result if execution_result.has_result else {}) def extract_response_text(result: dict) -> str: """Extract text from flow execution result.""" if "result" in result: return result["result"] if "text" in result: return result["text"] if "exception_message" in result: return result["exception_message"] return str(result)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/services/flow_executor.py", "license": "MIT License", "lines": 192, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/agentic/services/flow_preparation.py
"""Flow data preparation and model injection.""" import json from pathlib import Path from lfx.base.models.model_metadata import get_provider_param_mapping from lfx.base.models.unified_models import get_provider_config def inject_model_into_flow( flow_data: dict, provider: str, model_name: str, api_key_var: str | None = None, ) -> dict: """Inject model configuration into the flow's Agent component. Args: flow_data: The flow JSON as a dict provider: The provider name (e.g., "OpenAI", "Anthropic") model_name: The model name (e.g., "gpt-4o", "claude-sonnet-4-5-20250929") api_key_var: Optional API key variable name. If not provided, uses provider's default. Returns: Modified flow data with the model configuration injected Raises: ValueError: If provider is unknown """ provider_config = get_provider_config(provider) param_mapping = get_provider_param_mapping(provider) # Use provided api_key_var or default from config api_key_var = api_key_var or provider_config.get("variable_name") metadata = { "api_key_param": param_mapping.get("api_key_param", "api_key"), "context_length": 128000, "model_class": param_mapping.get("model_class", "ChatOpenAI"), "model_name_param": param_mapping.get("model_name_param", "model"), } # Add extra params from param mapping (url_param, project_id_param, base_url_param) for extra_param in ("url_param", "project_id_param", "base_url_param"): if extra_param in param_mapping: metadata[extra_param] = param_mapping[extra_param] model_value = [ { "category": provider, "icon": provider_config["icon"], "metadata": metadata, "name": model_name, "provider": provider, } ] # Inject into all Agent nodes for node in flow_data.get("data", {}).get("nodes", []): node_data = node.get("data", {}) if node_data.get("type") == "Agent": template = node_data.get("node", {}).get("template", {}) if "model" in template: template["model"]["value"] = model_value # Note: Do NOT set api_key here. The Agent component will automatically # look up the API key from the user's global variables using get_api_key_for_provider() # when the api_key field is empty/falsy. return flow_data def load_and_prepare_flow( flow_path: Path, provider: str | None, model_name: str | None, api_key_var: str | None, ) -> str: """Load flow file and prepare JSON with model injection.""" flow_data = json.loads(flow_path.read_text()) if provider and model_name: flow_data = inject_model_into_flow(flow_data, provider, model_name, api_key_var) return json.dumps(flow_data)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/services/flow_preparation.py", "license": "MIT License", "lines": 67, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/agentic/services/provider_service.py
"""Provider configuration service.""" import os from uuid import UUID from lfx.base.models.unified_models import ( get_model_provider_variable_mapping, get_provider_required_variable_keys, ) from lfx.log.logger import logger from sqlalchemy.ext.asyncio import AsyncSession from langflow.services.deps import get_variable_service from langflow.services.variable.constants import CREDENTIAL_TYPE from langflow.services.variable.service import DatabaseVariableService, VariableService # Preferred providers in order of priority PREFERRED_PROVIDERS = ["Anthropic", "OpenAI", "Google Generative AI", "Groq"] # Default models per provider DEFAULT_MODELS: dict[str, str] = { "Anthropic": "claude-sonnet-4-5-20250514", "OpenAI": "gpt-5.2", "Google Generative AI": "gemini-2.0-flash", "Groq": "llama-3.3-70b-versatile", } async def get_enabled_providers_for_user( user_id: UUID | str, session: AsyncSession, ) -> tuple[list[str], dict[str, bool]]: """Get enabled providers for a user. Returns: Tuple of (enabled_providers list, provider_status dict) """ variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): return [], {} all_variables = await variable_service.get_all(user_id=user_id, session=session) credential_names = {var.name for var in all_variables if var.type == CREDENTIAL_TYPE} if not credential_names: return [], {} provider_variable_map = get_model_provider_variable_mapping() enabled_providers = [] provider_status = {} for provider in provider_variable_map: # Check if ALL required variables for this provider are present required_keys = get_provider_required_variable_keys(provider) is_enabled = all(key in credential_names for key in required_keys) provider_status[provider] = is_enabled if is_enabled: enabled_providers.append(provider) return enabled_providers, provider_status async def check_api_key( variable_service: VariableService, user_id: UUID | str, key_name: str, session: AsyncSession, ) -> str | None: """Check if an API key is available from global variables or environment.""" api_key = None try: api_key = await variable_service.get_variable(user_id, key_name, "", session) except ValueError: logger.debug(f"{key_name} not found in global variables, checking environment") if not api_key: api_key = os.getenv(key_name) return api_key def get_default_provider(enabled_providers: list[str]) -> str | None: """Get the default provider from enabled providers based on priority.""" for preferred in PREFERRED_PROVIDERS: if preferred in enabled_providers: return preferred return enabled_providers[0] if enabled_providers else None def get_default_model(provider: str) -> str | None: """Get the default model for a provider.""" return DEFAULT_MODELS.get(provider)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/agentic/services/provider_service.py", "license": "MIT License", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/agentic/api/test_code_extraction.py
"""Tests for code extraction and validation in the agentic module. These tests validate the core functionality of extracting Python code from LLM responses and validating that the code is a valid Langflow component. """ from langflow.agentic.helpers.code_extraction import ( _find_code_blocks, _find_unclosed_code_block, extract_python_code, ) from langflow.agentic.helpers.validation import validate_component_code # Sample valid Langflow component code VALID_COMPONENT_CODE = """from langflow.custom import Component from langflow.io import MessageTextInput, Output from langflow.schema.message import Message class HelloWorldComponent(Component): display_name = "Hello World" description = "A simple hello world component." inputs = [ MessageTextInput(name="input_value", display_name="Input"), ] outputs = [ Output(display_name="Output", name="output", method="process"), ] def process(self) -> Message: return Message(text=f"Hello, {self.input_value}!") """ # Incomplete component code (missing closing bracket) INCOMPLETE_COMPONENT_CODE = """from langflow.custom import Component from langflow.io import MessageTextInput, Output class IncompleteComponent(Component): display_name = "Incomplete" inputs = [ MessageTextInput(name="input_value", display_name="Input"), """ # Invalid syntax code INVALID_SYNTAX_CODE = """from langflow.custom import Component class BrokenComponent(Component) display_name = "Broken" """ class TestExtractPythonCode: """Tests for extract_python_code function.""" def test_extract_from_closed_python_block(self): """Should extract code from a properly closed ```python block.""" text = f"Here is the component:\n\n```python\n{VALID_COMPONENT_CODE}\n```\n\nLet me know if you need changes." result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result assert "from langflow.custom import Component" in result def test_extract_from_unclosed_python_block(self): """Should extract code from an unclosed ```python block.""" text = f"Here is the component:\n\n```python\n{VALID_COMPONENT_CODE}" result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result def test_extract_with_text_before_code(self): """Should extract code even when there's text before the code block.""" text = f"""I apologize for the rate limit issue. Let me create a component. Here's a component that uses TextBlob for sentiment analysis: ```python {VALID_COMPONENT_CODE} ```""" result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result def test_extract_from_unclosed_block_with_text_before(self): """Should extract code from unclosed block even with text before it.""" text = f"""I apologize for the rate limit issue. Let me create the component. ```python {VALID_COMPONENT_CODE}""" result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result assert "from langflow.custom import Component" in result def test_extract_from_generic_code_block(self): """Should extract code from a generic ``` block without language specifier.""" text = f"Here is the code:\n\n```\n{VALID_COMPONENT_CODE}\n```" result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result def test_returns_none_for_no_code_blocks(self): """Should return None when there are no code blocks.""" text = "This is just regular text without any code blocks." result = extract_python_code(text) assert result is None def test_returns_none_for_empty_text(self): """Should return None for empty text.""" result = extract_python_code("") assert result is None def test_prefers_component_code_over_other_code(self): """When multiple code blocks exist, should prefer the one with Component class.""" other_code = "print('hello world')" text = f"""Here's a simple print: ```python {other_code} ``` And here's the component: ```python {VALID_COMPONENT_CODE} ```""" result = extract_python_code(text) assert result is not None assert "class HelloWorldComponent" in result # Should NOT return the print code assert result.strip() != other_code def test_handles_code_with_special_characters(self): """Should handle code containing special characters.""" code_with_specials = """from langflow.custom import Component class SpecialComponent(Component): display_name = "Special < > & Characters" description = "Handles 'quotes' and \\"escaped\\" chars" """ text = f"```python\n{code_with_specials}\n```" result = extract_python_code(text) assert result is not None assert "SpecialComponent" in result class TestFindCodeBlocks: """Tests for _find_code_blocks helper function.""" def test_finds_closed_python_blocks(self): """Should find all closed python code blocks.""" text = "```python\ncode1\n```\n\nText\n\n```python\ncode2\n```" result = _find_code_blocks(text) assert len(result) == 2 assert "code1" in result[0] assert "code2" in result[1] def test_finds_unclosed_blocks_as_fallback(self): """Should find unclosed blocks when no closed blocks exist.""" text = "Some text\n```python\ncode_here" result = _find_code_blocks(text) assert len(result) == 1 assert "code_here" in result[0] class TestFindUnclosedCodeBlock: """Tests for _find_unclosed_code_block helper function.""" def test_finds_unclosed_python_block(self): """Should find unclosed ```python block.""" text = "Text before\n```python\ncode_content" result = _find_unclosed_code_block(text) assert len(result) == 1 assert "code_content" in result[0] def test_finds_unclosed_generic_block(self): """Should find unclosed ``` block without language.""" text = "Text\n```\nsome code" result = _find_unclosed_code_block(text) assert len(result) == 1 assert "some code" in result[0] def test_returns_empty_for_closed_blocks(self): """Should return empty list when blocks are properly closed.""" text = "```python\ncode\n```" # This function only looks for unclosed blocks # When called by _find_code_blocks, closed blocks are found first result = _find_unclosed_code_block(text) # It will find from ```python to end, but the code will include the closing ``` # The function strips trailing backticks, so it should work assert len(result) >= 0 # May or may not find depending on implementation def test_returns_empty_for_no_code_blocks(self): """Should return empty list when no code blocks at all.""" text = "Just regular text" result = _find_unclosed_code_block(text) assert result == [] def test_handles_multiple_backticks_in_code(self): """Should handle code that contains backticks.""" text = "```python\ncode with `inline` backticks" result = _find_unclosed_code_block(text) assert len(result) == 1 assert "`inline`" in result[0] class TestValidateComponentCode: """Tests for validate_component_code function.""" def test_validates_valid_component(self): """Should validate correct Langflow component code.""" result = validate_component_code(VALID_COMPONENT_CODE) assert result.is_valid is True assert result.class_name == "HelloWorldComponent" assert result.error is None assert result.code == VALID_COMPONENT_CODE def test_fails_for_syntax_error(self): """Should fail validation for code with syntax errors.""" result = validate_component_code(INVALID_SYNTAX_CODE) assert result.is_valid is False assert result.error is not None # Error might be SyntaxError or ValueError depending on validation method assert "expected" in result.error.lower() or "syntax" in result.error.lower() def test_fails_for_incomplete_code(self): """Should fail validation for incomplete component code.""" result = validate_component_code(INCOMPLETE_COMPONENT_CODE) assert result.is_valid is False assert result.error is not None def test_fails_for_non_component_code(self): """Should fail validation for code that's not a Langflow component.""" non_component_code = """def hello(): return "hello" """ result = validate_component_code(non_component_code) assert result.is_valid is False assert result.error is not None def test_fails_for_empty_code(self): """Should fail validation for empty string.""" result = validate_component_code("") assert result.is_valid is False assert result.error is not None def test_fails_for_missing_imports(self): """Should fail validation when required imports are missing.""" code_without_imports = """class BrokenComponent(Component): display_name = "Broken" """ result = validate_component_code(code_without_imports) assert result.is_valid is False assert result.error is not None class TestCodeExtractionAndValidationIntegration: """Integration tests for the full extract -> validate flow.""" def test_full_flow_with_valid_response(self): """Should extract and validate a complete valid response.""" llm_response = f"""I'll create a Hello World component for you. ```python {VALID_COMPONENT_CODE} ``` This component takes an input and returns a greeting message.""" # Extract code = extract_python_code(llm_response) assert code is not None # Validate validation = validate_component_code(code) assert validation.is_valid is True assert validation.class_name == "HelloWorldComponent" def test_full_flow_with_unclosed_valid_response(self): """Should extract and validate from unclosed but valid code.""" llm_response = f"""Here's your component: ```python {VALID_COMPONENT_CODE}""" # Extract code = extract_python_code(llm_response) assert code is not None # Validate validation = validate_component_code(code) assert validation.is_valid is True def test_full_flow_with_invalid_code_returns_error(self): """Should extract but fail validation for broken code.""" llm_response = f"""Here's the component: ```python {INVALID_SYNTAX_CODE} ```""" # Extract code = extract_python_code(llm_response) assert code is not None # Validate should fail validation = validate_component_code(code) assert validation.is_valid is False assert validation.error is not None # Error might be SyntaxError or ValueError depending on validation method assert "expected" in validation.error.lower() or "syntax" in validation.error.lower() def test_full_flow_with_text_heavy_response(self): """Should handle responses with lots of explanatory text.""" llm_response = f"""I apologize for the previous rate limit error. Let me try again. Based on your request, I'll create a custom Langflow component that performs sentiment analysis. This component will: 1. Take text input 2. Process it through a sentiment analyzer 3. Return the sentiment score Here's the implementation: ```python {VALID_COMPONENT_CODE} ``` To use this component: 1. Drag it onto your canvas 2. Connect an input 3. The output will contain the sentiment analysis Let me know if you need any modifications!""" code = extract_python_code(llm_response) assert code is not None validation = validate_component_code(code) assert validation.is_valid is True assert validation.class_name == "HelloWorldComponent" class TestEdgeCases: """Edge case tests for robustness.""" def test_handles_windows_line_endings(self): """Should handle Windows-style line endings (CRLF).""" code = VALID_COMPONENT_CODE.replace("\n", "\r\n") text = f"```python\r\n{code}\r\n```" result = extract_python_code(text) assert result is not None assert "HelloWorldComponent" in result def test_handles_mixed_line_endings(self): """Should handle mixed line endings.""" text = f"Text\r\n```python\n{VALID_COMPONENT_CODE}\r\n```" result = extract_python_code(text) assert result is not None def test_handles_unicode_in_code(self): """Should handle unicode characters in code.""" unicode_code = """from langflow.custom import Component class UnicodeComponent(Component): display_name = "Unicode \u00e9\u00e0\u00fc" description = "Handles \u4e2d\u6587 and \ud83d\ude00" """ text = f"```python\n{unicode_code}\n```" result = extract_python_code(text) assert result is not None assert "Unicode" in result def test_handles_very_long_code(self): """Should handle very long code blocks.""" long_code = VALID_COMPONENT_CODE + "\n" * 1000 + "# End of long code" text = f"```python\n{long_code}\n```" result = extract_python_code(text) assert result is not None assert "HelloWorldComponent" in result assert "End of long code" in result def test_case_insensitive_python_tag(self): """Should handle Python tag with different cases.""" for tag in ["```python", "```Python", "```PYTHON", "```PyThOn"]: text = f"{tag}\n{VALID_COMPONENT_CODE}\n```" result = extract_python_code(text) assert result is not None, f"Failed for tag: {tag}"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/api/test_code_extraction.py", "license": "MIT License", "lines": 309, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/api/test_schemas.py
"""Tests for API schemas. Tests the Pydantic models used for request/response validation. """ import pytest from langflow.agentic.api.schemas import AssistantRequest, StepType, ValidationResult from pydantic import ValidationError class TestAssistantRequest: """Tests for AssistantRequest schema.""" def test_should_create_with_required_field_only(self): """Should create request with only required flow_id field.""" request = AssistantRequest(flow_id="test-flow-id") assert request.flow_id == "test-flow-id" assert request.component_id is None assert request.field_name is None assert request.input_value is None assert request.max_retries is None assert request.model_name is None assert request.provider is None assert request.session_id is None def test_should_create_with_all_fields(self): """Should create request with all fields populated.""" request = AssistantRequest( flow_id="flow-123", component_id="comp-456", field_name="input_field", input_value="Hello, world!", max_retries=5, model_name="gpt-4", provider="OpenAI", session_id="session-789", ) assert request.flow_id == "flow-123" assert request.component_id == "comp-456" assert request.field_name == "input_field" assert request.input_value == "Hello, world!" assert request.max_retries == 5 assert request.model_name == "gpt-4" assert request.provider == "OpenAI" assert request.session_id == "session-789" def test_should_raise_error_for_missing_flow_id(self): """Should raise validation error when flow_id is missing.""" with pytest.raises(ValidationError) as exc_info: AssistantRequest() assert "flow_id" in str(exc_info.value) def test_should_accept_empty_string_for_optional_fields(self): """Should accept empty string for optional string fields.""" request = AssistantRequest( flow_id="test", input_value="", component_id="", ) assert request.input_value == "" assert request.component_id == "" def test_should_serialize_to_dict(self): """Should serialize to dictionary correctly.""" request = AssistantRequest( flow_id="test-flow", max_retries=3, provider="Anthropic", ) data = request.model_dump() assert data["flow_id"] == "test-flow" assert data["max_retries"] == 3 assert data["provider"] == "Anthropic" assert data["component_id"] is None def test_should_deserialize_from_dict(self): """Should deserialize from dictionary correctly.""" data = { "flow_id": "test-flow", "input_value": "test input", "max_retries": 2, } request = AssistantRequest(**data) assert request.flow_id == "test-flow" assert request.input_value == "test input" assert request.max_retries == 2 class TestValidationResult: """Tests for ValidationResult schema.""" def test_should_create_valid_result(self): """Should create a valid validation result.""" result = ValidationResult( is_valid=True, code="class MyComponent(Component): pass", class_name="MyComponent", ) assert result.is_valid is True assert result.code == "class MyComponent(Component): pass" assert result.class_name == "MyComponent" assert result.error is None def test_should_create_invalid_result_with_error(self): """Should create an invalid validation result with error.""" result = ValidationResult( is_valid=False, code="class Broken(Component)", error="SyntaxError: expected ':'", class_name="Broken", ) assert result.is_valid is False assert result.error == "SyntaxError: expected ':'" assert result.class_name == "Broken" def test_should_create_with_required_field_only(self): """Should create with only required is_valid field.""" result = ValidationResult(is_valid=False) assert result.is_valid is False assert result.code is None assert result.error is None assert result.class_name is None def test_should_serialize_to_dict(self): """Should serialize to dictionary correctly.""" result = ValidationResult( is_valid=True, code="test code", class_name="TestComponent", ) data = result.model_dump() assert data["is_valid"] is True assert data["code"] == "test code" assert data["class_name"] == "TestComponent" assert data["error"] is None def test_should_deserialize_from_dict(self): """Should deserialize from dictionary correctly.""" data = { "is_valid": False, "error": "Test error", } result = ValidationResult(**data) assert result.is_valid is False assert result.error == "Test error" class TestStepType: """Tests for StepType literal type.""" def test_should_define_all_expected_step_types(self): """Should define all expected step types.""" expected_steps = [ "generating", "generation_complete", "extracting_code", "validating", "validated", "validation_failed", "retrying", ] # StepType is a Literal, we can check its args step_type_args = StepType.__args__ for step in expected_steps: assert step in step_type_args, f"Missing step type: {step}" def test_step_types_should_be_strings(self): """All step types should be strings.""" for step in StepType.__args__: assert isinstance(step, str) class TestSchemaIntegration: """Integration tests for schema interactions.""" def test_assistant_request_json_round_trip(self): """Should survive JSON serialization round trip.""" original = AssistantRequest( flow_id="test-flow", component_id="comp-1", input_value="test", max_retries=3, ) json_str = original.model_dump_json() restored = AssistantRequest.model_validate_json(json_str) assert restored.flow_id == original.flow_id assert restored.component_id == original.component_id assert restored.input_value == original.input_value assert restored.max_retries == original.max_retries def test_validation_result_json_round_trip(self): """Should survive JSON serialization round trip.""" original = ValidationResult( is_valid=True, code="class Test: pass", class_name="Test", ) json_str = original.model_dump_json() restored = ValidationResult.model_validate_json(json_str) assert restored.is_valid == original.is_valid assert restored.code == original.code assert restored.class_name == original.class_name
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/api/test_schemas.py", "license": "MIT License", "lines": 177, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/api/test_streaming_validation.py
"""Tests for streaming validation flow in the agentic module. These tests validate the retry logic and SSE event emission for component generation. """ import json from unittest.mock import AsyncMock, patch import pytest from langflow.agentic.helpers.code_extraction import extract_python_code from langflow.agentic.helpers.sse import ( format_complete_event, format_error_event, format_progress_event, ) from langflow.agentic.helpers.validation import validate_component_code from langflow.agentic.services.assistant_service import ( execute_flow_with_validation, execute_flow_with_validation_streaming, ) from langflow.agentic.services.flow_types import ( VALIDATION_RETRY_TEMPLATE, IntentResult, ) # Sample valid Langflow component code VALID_COMPONENT_CODE = """from langflow.custom import Component from langflow.io import MessageTextInput, Output from langflow.schema.message import Message class HelloWorldComponent(Component): display_name = "Hello World" description = "A simple hello world component." inputs = [ MessageTextInput(name="input_value", display_name="Input"), ] outputs = [ Output(display_name="Output", name="output", method="process"), ] def process(self) -> Message: return Message(text=f"Hello, {self.input_value}!") """ # Invalid component code (syntax error but has inputs/outputs to pass extraction) INVALID_COMPONENT_CODE = """from langflow.custom import Component from langflow.io import MessageTextInput, Output class BrokenComponent(Component) # Missing colon here display_name = "Broken" inputs = [ MessageTextInput(name="text", display_name="Text"), ] outputs = [ Output(display_name="Output", name="output", method="process"), ] """ # Incomplete code that got cut off (simulating rate limit/token limit) CUTOFF_COMPONENT_CODE = """from __future__ import annotations from langflow.custom import Component from langflow.io import MessageTextInput, Output from langflow.schema.message import Message class SentimentAnalyzer(Component): display_name = "Sentiment Analyzer" description = "Analyzes sentiment of text" inputs = [ MessageTextInput(name="text", display_name="Text"), ] outputs = [ Output(display_name="Result", name="result", method="analyze"), ] def analyze(self) -> Message: # This code is cut off mid-implementation""" class TestSSEEventFormatting: """Tests for SSE event formatting functions.""" def testformat_progress_event_format(self): """Should format progress event correctly.""" result = format_progress_event("generating", 1, 4) assert result.startswith("data: ") assert result.endswith("\n\n") # Parse the JSON json_str = result[6:-2] # Remove "data: " and "\n\n" data = json.loads(json_str) assert data["event"] == "progress" assert data["step"] == "generating" assert data["attempt"] == 1 assert data["max_attempts"] == 4 def testformat_progress_event_validating_step(self): """Should format validating progress event correctly.""" result = format_progress_event("validating", 2, 4) json_str = result[6:-2] data = json.loads(json_str) assert data["step"] == "validating" assert data["attempt"] == 2 def testformat_complete_event_format(self): """Should format complete event correctly.""" test_data = {"result": "test", "validated": True, "class_name": "TestComponent"} result = format_complete_event(test_data) assert result.startswith("data: ") assert result.endswith("\n\n") json_str = result[6:-2] data = json.loads(json_str) assert data["event"] == "complete" assert data["data"]["validated"] is True assert data["data"]["class_name"] == "TestComponent" def testformat_error_event_format(self): """Should format error event correctly.""" result = format_error_event("Rate limit exceeded") json_str = result[6:-2] data = json.loads(json_str) assert data["event"] == "error" assert data["message"] == "Rate limit exceeded" class TestValidationRetryTemplate: """Tests for the validation retry prompt template.""" def test_retry_template_contains_error(self): """Should include error message in retry template.""" error = "SyntaxError: invalid syntax" code = "def broken():" result = VALIDATION_RETRY_TEMPLATE.format(error=error, code=code) assert error in result assert code in result assert "fix" in result.lower() or "correct" in result.lower() def _mock_streaming_result(result): """Create an async generator that yields a single end event with the given result.""" async def _gen(*_args, **_kwargs): yield ("end", result) return _gen def _mock_streaming_sequence(results): """Create an async generator factory that yields different results on each call.""" call_count = 0 async def _gen(*_args, **_kwargs): nonlocal call_count call_count += 1 yield ("end", results[min(call_count - 1, len(results) - 1)]) return _gen def _mock_intent_classification(intent: str = "generate_component"): """Create an async mock that returns IntentResult.""" async def _mock(*_args, **_kwargs): return IntentResult(translation="mocked", intent=intent) return _mock class TestStreamingValidationFlow: """Tests for execute_flow_with_validation_streaming function.""" @pytest.mark.asyncio async def test_valid_code_first_try_returns_validated(self): """When code is valid on first try, should return validated=True.""" mock_flow_result = {"result": f"Here is your component:\n\n```python\n{VALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(mock_flow_result), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a hello world component", global_variables={}, max_retries=3, ) ] # Should have progress events + complete event assert len(events) >= 2 # Parse all events parsed_events = [] for event in events: json_str = event[6:-2] # Remove "data: " and "\n\n" parsed_events.append(json.loads(json_str)) # Should have generating_component progress (component generation mode) generating_events = [ e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "generating_component" ] assert len(generating_events) == 1 # Should have validating progress validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 1 # Should have complete event with validated=True complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert complete_events[0]["data"]["validated"] is True assert complete_events[0]["data"]["class_name"] == "HelloWorldComponent" @pytest.mark.asyncio async def test_invalid_code_retries_until_success(self): """When code is invalid, should retry with error context until valid.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} valid_response = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_sequence([invalid_response, valid_response]), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] # Parse events parsed_events = [json.loads(e[6:-2]) for e in events] # Should have 2 generating_component events (attempt 1 and 2) generating_events = [ e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "generating_component" ] assert len(generating_events) == 2 # Should have 2 validating events validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 2 # Should have complete event with validated=True (after retry) complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert complete_events[0]["data"]["validated"] is True assert complete_events[0]["data"]["validation_attempts"] == 1 @pytest.mark.asyncio async def test_all_retries_fail_returns_validation_error(self): """When all retries fail, should return validated=False with error.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(invalid_response), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=2, # Will try 3 times total (1 + 2 retries) ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should have 3 generating_component events (max_retries + 1) generating_events = [ e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "generating_component" ] assert len(generating_events) == 3 # Should have complete event with validated=False complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert complete_events[0]["data"]["validated"] is False assert complete_events[0]["data"]["validation_error"] is not None assert complete_events[0]["data"]["validation_attempts"] == 2 @pytest.mark.asyncio async def test_no_code_in_response_returns_as_is(self): """When response has no code (question intent), should return without validation.""" text_only_response = {"result": "Langflow is a visual flow builder for LLM applications."} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("question"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(text_only_response), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="what is langflow?", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # For question intent, should have generating event (not generating_component) generating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "generating"] assert len(generating_events) == 1 # Should NOT have validating event validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 0 # Complete event should NOT have validated field complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert "validated" not in complete_events[0]["data"] @pytest.mark.asyncio async def test_flow_execution_error_returnsformat_error_event(self): """When flow execution fails, should return SSE error event.""" from fastapi import HTTPException async def mock_streaming_error(*_args, **_kwargs): raise HTTPException(status_code=429, detail="Rate limit exceeded") yield # makes this an async generator with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=mock_streaming_error, ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should have error event error_events = [e for e in parsed_events if e.get("event") == "error"] assert len(error_events) == 1 assert "rate limit" in error_events[0]["message"].lower() class TestValidationRetryBehavior: """Tests specifically for the retry behavior with error context.""" @pytest.mark.asyncio async def test_retry_includes_previous_error_in_prompt(self): """When retrying, should include the validation error in the new prompt.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} valid_response = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} captured_inputs = [] call_count = 0 async def mock_streaming(*_args, **_kwargs): nonlocal call_count call_count += 1 captured_inputs.append(_kwargs.get("input_value")) if call_count == 1: yield ("end", invalid_response) else: yield ("end", valid_response) with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=mock_streaming, ), ): # Consume the generator to trigger the mock calls _ = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] # Should have captured 2 inputs assert len(captured_inputs) == 2 # First input is the original user input (translation is used only for intent classification) assert captured_inputs[0] == "create a component" # Second input should contain error context assert "error" in captured_inputs[1].lower() assert "fix" in captured_inputs[1].lower() or "correct" in captured_inputs[1].lower() # Should include the broken code assert INVALID_COMPONENT_CODE.strip() in captured_inputs[1] or "BrokenComponent" in captured_inputs[1] class TestNonStreamingValidation: """Tests for the non-streaming validation function.""" @pytest.mark.asyncio async def test_non_streaming_valid_code_returns_validated(self): """Non-streaming validation should work the same as streaming.""" mock_flow_result = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} with patch( "langflow.agentic.services.assistant_service.execute_flow_file", new_callable=AsyncMock, return_value=mock_flow_result, ): result = await execute_flow_with_validation( flow_filename="test.json", input_value="create a hello world component", global_variables={}, max_retries=3, ) assert result["validated"] is True assert result["class_name"] == "HelloWorldComponent" assert result["validation_attempts"] == 1 @pytest.mark.asyncio async def test_non_streaming_retries_on_failure(self): """Non-streaming should retry until valid code is generated.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} valid_response = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} call_count = 0 async def mock_execute_flow(*_args, **_kwargs): nonlocal call_count call_count += 1 if call_count == 1: return invalid_response return valid_response with patch( "langflow.agentic.services.assistant_service.execute_flow_file", side_effect=mock_execute_flow, ): result = await execute_flow_with_validation( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) assert result["validated"] is True assert result["validation_attempts"] == 2 @pytest.mark.asyncio async def test_non_streaming_max_retries_returns_error(self): """After max retries, should return validation error.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} with patch( "langflow.agentic.services.assistant_service.execute_flow_file", new_callable=AsyncMock, return_value=invalid_response, ): result = await execute_flow_with_validation( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=2, ) assert result["validated"] is False assert result["validation_error"] is not None class TestResponseWithTextAndCode: """Tests for handling responses that contain both text and code. This is the main issue being debugged - LLM responses that include explanatory text along with code blocks. """ @pytest.mark.asyncio async def test_extracts_code_from_response_with_text_before(self): """Should correctly extract and validate code when text comes before it.""" response_with_text = { "result": f"""I apologize for the rate limit issue. Let me help you create the component. Here's the implementation: ```python {VALID_COMPONENT_CODE} ``` This component will process your input.""" } with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(response_with_text), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should have validating event validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 1 # Should have complete event with validated=True complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert complete_events[0]["data"]["validated"] is True @pytest.mark.asyncio async def test_extracts_code_from_unclosed_block_with_text(self): """Should correctly extract code from unclosed block with text before it.""" response_with_unclosed = { "result": f"""I apologize for the rate limit issue. ```python {VALID_COMPONENT_CODE}""" } with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(response_with_unclosed), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should validate and pass complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 assert complete_events[0]["data"]["validated"] is True @pytest.mark.asyncio async def test_complete_event_includes_component_code(self): """Complete event should include the extracted component_code field.""" mock_flow_result = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(mock_flow_result), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 complete_data = complete_events[0]["data"] assert "component_code" in complete_data assert "HelloWorldComponent" in complete_data["component_code"] @pytest.mark.asyncio async def test_validation_failure_includes_component_code(self): """When validation fails, should still include the attempted code.""" invalid_response = {"result": f"```python\n{INVALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(invalid_response), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=0, # No retries - fail immediately ) ] parsed_events = [json.loads(e[6:-2]) for e in events] complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 complete_data = complete_events[0]["data"] assert complete_data["validated"] is False assert "component_code" in complete_data assert "BrokenComponent" in complete_data["component_code"] class TestRealWorldScenarios: """Tests for real-world scenarios the user encountered. These tests simulate the exact patterns seen in production. """ @pytest.mark.asyncio async def test_response_with_apology_and_cutoff_code(self): """Should handle response with apology text and cut-off/incomplete code.""" # This simulates the exact response the user showed response_with_apology = { "result": f"""I apologize for the rate limit issue. Let me create the component. Here's the implementation: ```python {CUTOFF_COMPONENT_CODE}""" } with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(response_with_apology), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a sentiment analyzer", global_variables={}, max_retries=0, # Test with no retries to see immediate behavior ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should have validating event (code was extracted) validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 1, "Code should be extracted and validation attempted" # Should have complete event complete_events = [e for e in parsed_events if e.get("event") == "complete"] assert len(complete_events) == 1 complete_data = complete_events[0]["data"] # Validation should FAIL because code is incomplete assert complete_data["validated"] is False, "Incomplete code should fail validation" # Should have validation error assert complete_data.get("validation_error") is not None, "Should have validation error" # Should include the extracted code assert "component_code" in complete_data, "Should include extracted code" assert "SentimentAnalyzer" in complete_data["component_code"] @pytest.mark.asyncio async def test_response_with_apology_and_cutoff_code_with_retries(self): """After exhausting retries with cutoff code, should return validated=False.""" cutoff_response = { "result": f"""I apologize for the issue. ```python {CUTOFF_COMPONENT_CODE}""" } with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_result(cutoff_response), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=2, # Will try 3 times ) ] parsed_events = [json.loads(e[6:-2]) for e in events] # Should have 3 generating_component events generating_events = [ e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "generating_component" ] assert len(generating_events) == 3 # Should have 3 validating events validating_events = [e for e in parsed_events if e.get("event") == "progress" and e.get("step") == "validating"] assert len(validating_events) == 3 # Complete event should have validated=False complete_events = [e for e in parsed_events if e.get("event") == "complete"] complete_data = complete_events[0]["data"] assert complete_data["validated"] is False assert complete_data["validation_attempts"] == 2 @pytest.mark.asyncio async def test_cutoff_code_retry_gets_valid_code(self): """If retry gets valid code, should return validated=True.""" cutoff_response = { "result": f"""Error occurred. ```python {CUTOFF_COMPONENT_CODE}""" } valid_response = {"result": f"```python\n{VALID_COMPONENT_CODE}\n```"} with ( patch( "langflow.agentic.services.assistant_service.classify_intent", side_effect=_mock_intent_classification("generate_component"), ), patch( "langflow.agentic.services.assistant_service.execute_flow_file_streaming", side_effect=_mock_streaming_sequence([cutoff_response, cutoff_response, valid_response]), ), ): events = [ event async for event in execute_flow_with_validation_streaming( flow_filename="test.json", input_value="create a component", global_variables={}, max_retries=3, ) ] parsed_events = [json.loads(e[6:-2]) for e in events] complete_events = [e for e in parsed_events if e.get("event") == "complete"] complete_data = complete_events[0]["data"] # Should eventually succeed assert complete_data["validated"] is True assert complete_data["validation_attempts"] == 2 assert complete_data["class_name"] == "HelloWorldComponent" def test_code_extraction_from_exact_user_response(self): """Test extraction from the exact response pattern user showed.""" # Exact pattern from user's screenshot user_response = """I apologize for the rate limit issue. Let me create the component. Here's the implementation: ```python from __future__ import annotations from langflow.custom import Component from langflow.io import MessageTextInput, Output class SentimentComponent(Component): display_name = "Sentiment" inputs = [ MessageTextInput(name="text", display_name="Input"), ] outputs = [ Output(name="output", method="run"), ] def run(self):""" # Should extract the code code = extract_python_code(user_response) assert code is not None, "Should extract code from user response" assert "SentimentComponent" in code assert "from __future__" in code # Validate should fail due to incomplete code validation = validate_component_code(code) assert validation.is_valid is False, "Incomplete code should fail validation" assert validation.error is not None
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/api/test_streaming_validation.py", "license": "MIT License", "lines": 708, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/helpers/test_error_handling.py
"""Tests for error handling helpers. Tests the error categorization and user-friendly message generation. """ from langflow.agentic.helpers.error_handling import ( ERROR_PATTERNS, MAX_ERROR_MESSAGE_LENGTH, MIN_MEANINGFUL_PART_LENGTH, _truncate_error_message, extract_friendly_error, ) class TestExtractFriendlyError: """Tests for extract_friendly_error function.""" def test_should_return_friendly_message_for_rate_limit_error(self): """Should return user-friendly message for rate limit errors.""" error_messages = [ "rate_limit exceeded", "Error 429: Too many requests", "Rate limit reached for model", ] for error in error_messages: result = extract_friendly_error(error) assert "rate limit" in result.lower() assert "wait" in result.lower() or "try again" in result.lower() def test_should_return_friendly_message_for_authentication_error(self): """Should return user-friendly message for authentication errors.""" error_messages = [ "authentication failed", "Invalid api_key provided", "Unauthorized access", "Error 401: Unauthorized", ] for error in error_messages: result = extract_friendly_error(error) assert "authentication" in result.lower() or "api key" in result.lower() def test_should_return_friendly_message_for_quota_error(self): """Should return user-friendly message for quota errors.""" error_messages = [ "quota exceeded", "billing limit reached", "Insufficient credits", ] for error in error_messages: result = extract_friendly_error(error) assert "quota" in result.lower() or "billing" in result.lower() def test_should_return_friendly_message_for_timeout_error(self): """Should return user-friendly message for timeout errors.""" error_messages = [ "Request timeout", "Connection timed out", "Operation timed out after 30 seconds", ] for error in error_messages: result = extract_friendly_error(error) assert "timeout" in result.lower() or "timed out" in result.lower() def test_should_return_friendly_message_for_connection_error(self): """Should return user-friendly message for connection errors.""" error_messages = [ "Connection refused", "Network error occurred", "Unable to establish connection", ] for error in error_messages: result = extract_friendly_error(error) assert "connection" in result.lower() or "network" in result.lower() def test_should_return_friendly_message_for_server_error(self): """Should return user-friendly message for server errors.""" error_messages = [ "Error 500: Internal server error", "500 Internal Server Error", ] for error in error_messages: result = extract_friendly_error(error) assert "server error" in result.lower() def test_should_return_friendly_message_for_model_not_found(self): """Should return user-friendly message for model not found errors.""" error_messages = [ "Model gpt-5 not found", "The model does not exist", "Model claude-99 is not available", ] for error in error_messages: result = extract_friendly_error(error) assert "model" in result.lower() assert "not available" in result.lower() or "different" in result.lower() def test_should_return_friendly_message_for_content_policy_error(self): """Should return user-friendly message for content policy errors.""" error_messages = [ "Content blocked by safety filter", "Request violates content policy", "Content filter triggered", ] for error in error_messages: result = extract_friendly_error(error) assert "content" in result.lower() or "policy" in result.lower() assert "modify" in result.lower() or "blocked" in result.lower() def test_should_truncate_unknown_error_messages(self): """Should truncate unknown error messages that are too long.""" long_error = "x" * 200 result = extract_friendly_error(long_error) assert len(result) <= MAX_ERROR_MESSAGE_LENGTH + 3 # +3 for "..." assert result.endswith("...") def test_should_return_original_for_short_unknown_errors(self): """Should return original message for short unknown errors.""" short_error = "Unknown error occurred" result = extract_friendly_error(short_error) assert result == short_error def test_should_handle_empty_string(self): """Should handle empty error string.""" result = extract_friendly_error("") assert result == "" def test_should_be_case_insensitive(self): """Should match error patterns case-insensitively.""" error_messages = [ "RATE_LIMIT exceeded", "Rate_Limit error", "rAtE_lImIt issue", ] for error in error_messages: result = extract_friendly_error(error) assert "rate limit" in result.lower() class TestTruncateErrorMessage: """Tests for _truncate_error_message function.""" def test_should_return_original_for_short_messages(self): """Should return original message when within limit.""" short_message = "Short error" result = _truncate_error_message(short_message) assert result == short_message def test_should_truncate_long_messages(self): """Should truncate messages that exceed the limit.""" long_message = "x" * 200 result = _truncate_error_message(long_message) assert len(result) == MAX_ERROR_MESSAGE_LENGTH + 3 assert result.endswith("...") def test_should_extract_meaningful_part_from_colon_separated(self): """Should extract meaningful part from colon-separated messages.""" message = "Very long prefix that we dont need: This is the meaningful error message" result = _truncate_error_message(message) # Should prefer the meaningful part after colon assert "meaningful error" in result.lower() or len(result) <= MAX_ERROR_MESSAGE_LENGTH + 3 def test_should_skip_too_short_parts_after_colon(self): """Should skip parts that are too short to be meaningful.""" message = "x" * 200 + ": ab" # "ab" is too short result = _truncate_error_message(message) # Should fall back to truncation since "ab" is too short assert result.endswith("...") def test_should_handle_message_at_exact_limit(self): """Should return original when message is exactly at limit.""" exact_message = "x" * MAX_ERROR_MESSAGE_LENGTH result = _truncate_error_message(exact_message) assert result == exact_message class TestErrorPatterns: """Tests for ERROR_PATTERNS configuration.""" def test_should_have_expected_pattern_categories(self): """Should have all expected error pattern categories.""" expected_patterns = [ "rate_limit", "authentication", "quota", "timeout", "connection", "500", ] all_patterns = [] for patterns, _ in ERROR_PATTERNS: all_patterns.extend(patterns) for expected in expected_patterns: assert any(expected in pattern for pattern in all_patterns), f"Missing pattern category: {expected}" def test_each_pattern_should_have_friendly_message(self): """Each pattern list should have an associated friendly message.""" for patterns, friendly_message in ERROR_PATTERNS: assert isinstance(patterns, list) assert len(patterns) > 0 assert isinstance(friendly_message, str) assert len(friendly_message) > 0 class TestConstants: """Tests for module constants.""" def test_max_error_message_length_is_reasonable(self): """MAX_ERROR_MESSAGE_LENGTH should be a reasonable value.""" assert MAX_ERROR_MESSAGE_LENGTH > 50 assert MAX_ERROR_MESSAGE_LENGTH < 500 def test_min_meaningful_part_length_is_reasonable(self): """MIN_MEANINGFUL_PART_LENGTH should be a reasonable value.""" assert MIN_MEANINGFUL_PART_LENGTH > 0 assert MIN_MEANINGFUL_PART_LENGTH < 50
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/helpers/test_error_handling.py", "license": "MIT License", "lines": 184, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/helpers/test_sse.py
"""Tests for SSE (Server-Sent Events) formatting helpers. Tests the event formatting functions used for streaming responses. """ import json import pytest from langflow.agentic.helpers.sse import ( format_complete_event, format_error_event, format_progress_event, format_token_event, ) class TestFormatProgressEvent: """Tests for format_progress_event function.""" def test_should_format_basic_progress_event(self): """Should format a basic progress event correctly.""" result = format_progress_event("generating", 1, 4) assert result.startswith("data: ") assert result.endswith("\n\n") json_str = result[6:-2] data = json.loads(json_str) assert data["event"] == "progress" assert data["step"] == "generating" assert data["attempt"] == 1 assert data["max_attempts"] == 4 def test_should_include_optional_message(self): """Should include optional message when provided.""" result = format_progress_event("validating", 2, 3, message="Validating component...") data = json.loads(result[6:-2]) assert data["message"] == "Validating component..." def test_should_include_optional_error(self): """Should include optional error when provided.""" result = format_progress_event("validation_failed", 1, 3, error="SyntaxError: invalid syntax") data = json.loads(result[6:-2]) assert data["error"] == "SyntaxError: invalid syntax" def test_should_include_optional_class_name(self): """Should include optional class_name when provided.""" result = format_progress_event("validated", 1, 3, class_name="MyComponent") data = json.loads(result[6:-2]) assert data["class_name"] == "MyComponent" def test_should_include_optional_component_code(self): """Should include optional component_code when provided.""" code = "class Test(Component): pass" result = format_progress_event("validation_failed", 1, 3, component_code=code) data = json.loads(result[6:-2]) assert data["component_code"] == code def test_should_include_all_optional_fields(self): """Should include all optional fields when provided.""" result = format_progress_event( "validation_failed", 2, 4, message="Validation failed", error="SyntaxError", class_name="BrokenComponent", component_code="class Broken: pass", ) data = json.loads(result[6:-2]) assert data["event"] == "progress" assert data["step"] == "validation_failed" assert data["attempt"] == 2 assert data["max_attempts"] == 4 assert data["message"] == "Validation failed" assert data["error"] == "SyntaxError" assert data["class_name"] == "BrokenComponent" assert data["component_code"] == "class Broken: pass" def test_should_omit_none_optional_fields(self): """Should not include optional fields when they are None.""" result = format_progress_event("generating", 1, 3) data = json.loads(result[6:-2]) assert "message" not in data assert "error" not in data assert "class_name" not in data assert "component_code" not in data @pytest.mark.parametrize( "step", [ "generating", "generation_complete", "extracting_code", "validating", "validated", "validation_failed", "retrying", ], ) def test_should_accept_all_valid_step_types(self, step: str): """Should accept all valid step types.""" result = format_progress_event(step, 1, 3) data = json.loads(result[6:-2]) assert data["step"] == step class TestFormatCompleteEvent: """Tests for format_complete_event function.""" def test_should_format_complete_event_with_data(self): """Should format complete event with provided data.""" test_data = {"result": "test", "validated": True, "class_name": "TestComponent"} result = format_complete_event(test_data) assert result.startswith("data: ") assert result.endswith("\n\n") parsed = json.loads(result[6:-2]) assert parsed["event"] == "complete" assert parsed["data"] == test_data def test_should_format_complete_event_with_empty_data(self): """Should format complete event with empty data dict.""" result = format_complete_event({}) parsed = json.loads(result[6:-2]) assert parsed["event"] == "complete" assert parsed["data"] == {} def test_should_preserve_nested_data_structure(self): """Should preserve nested data structures.""" nested_data = { "result": "success", "metadata": {"attempts": 3, "duration": 1.5}, "items": [1, 2, 3], } result = format_complete_event(nested_data) parsed = json.loads(result[6:-2]) assert parsed["data"]["metadata"]["attempts"] == 3 assert parsed["data"]["items"] == [1, 2, 3] class TestFormatErrorEvent: """Tests for format_error_event function.""" def test_should_format_error_event_with_message(self): """Should format error event with provided message.""" result = format_error_event("Rate limit exceeded") assert result.startswith("data: ") assert result.endswith("\n\n") parsed = json.loads(result[6:-2]) assert parsed["event"] == "error" assert parsed["message"] == "Rate limit exceeded" def test_should_format_error_event_with_empty_message(self): """Should format error event with empty message.""" result = format_error_event("") parsed = json.loads(result[6:-2]) assert parsed["event"] == "error" assert parsed["message"] == "" def test_should_preserve_special_characters_in_message(self): """Should preserve special characters in error message.""" message = 'Error: "invalid" <syntax> & issues' result = format_error_event(message) parsed = json.loads(result[6:-2]) assert parsed["message"] == message class TestFormatTokenEvent: """Tests for format_token_event function.""" def test_should_format_token_event_with_chunk(self): """Should format token event with provided chunk.""" result = format_token_event("Hello") assert result.startswith("data: ") assert result.endswith("\n\n") parsed = json.loads(result[6:-2]) assert parsed["event"] == "token" assert parsed["chunk"] == "Hello" def test_should_format_token_event_with_empty_chunk(self): """Should format token event with empty chunk.""" result = format_token_event("") parsed = json.loads(result[6:-2]) assert parsed["event"] == "token" assert parsed["chunk"] == "" def test_should_preserve_whitespace_in_chunk(self): """Should preserve whitespace in token chunk.""" result = format_token_event(" hello world ") parsed = json.loads(result[6:-2]) assert parsed["chunk"] == " hello world " def test_should_preserve_newlines_in_chunk(self): """Should preserve newlines in token chunk.""" result = format_token_event("line1\nline2\n") parsed = json.loads(result[6:-2]) assert parsed["chunk"] == "line1\nline2\n" def test_should_handle_unicode_in_chunk(self): """Should handle unicode characters in chunk.""" result = format_token_event("Hello 世界 🌍") parsed = json.loads(result[6:-2]) assert parsed["chunk"] == "Hello 世界 🌍" class TestSSEFormatConsistency: """Tests for SSE format consistency across all event types.""" def test_all_events_should_have_consistent_format(self): """All events should have consistent SSE format.""" events = [ format_progress_event("generating", 1, 3), format_complete_event({"result": "test"}), format_error_event("error"), format_token_event("chunk"), ] for event in events: assert event.startswith("data: ") assert event.endswith("\n\n") # Should be valid JSON between "data: " and "\n\n" json_str = event[6:-2] parsed = json.loads(json_str) assert "event" in parsed def test_events_should_produce_valid_json(self): """All events should produce valid JSON.""" test_cases = [ format_progress_event("validating", 2, 4, message="Testing"), format_complete_event({"complex": {"nested": [1, 2, 3]}}), format_error_event("Test error with 'quotes' and \"double quotes\""), format_token_event("Token with special chars: <>&"), ] for event in test_cases: json_str = event[6:-2] # Should not raise json.loads(json_str)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/helpers/test_sse.py", "license": "MIT License", "lines": 200, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/services/test_flow_executor.py
"""Tests for flow executor service. Tests the flow execution, model injection, and streaming functionality. """ import json from pathlib import Path from unittest.mock import AsyncMock, MagicMock, patch import pytest from fastapi import HTTPException from langflow.agentic.services.flow_executor import ( execute_flow_file, execute_flow_file_streaming, extract_response_text, ) from langflow.agentic.services.flow_preparation import ( inject_model_into_flow, load_and_prepare_flow, ) from langflow.agentic.services.flow_types import ( FLOWS_BASE_PATH, STREAMING_EVENT_TIMEOUT_SECONDS, STREAMING_QUEUE_MAX_SIZE, FlowExecutionResult, ) from langflow.agentic.services.helpers.event_consumer import parse_event_data class TestFlowExecutionResult: """Tests for FlowExecutionResult dataclass.""" def test_should_create_with_defaults(self): """Should create with default empty values.""" result = FlowExecutionResult() assert result.result == {} assert result.error is None assert result.has_error is False assert result.has_result is False def test_should_detect_error(self): """Should detect when error is set.""" result = FlowExecutionResult(error=ValueError("test")) assert result.has_error is True assert result.has_result is False def test_should_detect_result(self): """Should detect when result is set.""" result = FlowExecutionResult(result={"key": "value"}) assert result.has_result is True assert result.has_error is False def test_should_allow_both_result_and_error(self): """Should allow both result and error to be set.""" result = FlowExecutionResult(result={"partial": "data"}, error=ValueError("partial failure")) assert result.has_result is True assert result.has_error is True class TestInjectModelIntoFlow: """Tests for inject_model_into_flow function.""" def test_should_inject_model_into_agent_node(self): """Should inject model configuration into Agent node.""" flow_data = { "data": { "nodes": [ { "data": { "type": "Agent", "node": {"template": {"model": {"value": []}}}, } } ] } } with patch("langflow.agentic.services.flow_preparation.get_provider_config") as mock_config: mock_config.return_value = { "variable_name": "OPENAI_API_KEY", "api_key_param": "api_key", "model_class": "ChatOpenAI", "model_name_param": "model", "icon": "OpenAI", } result = inject_model_into_flow(flow_data, "OpenAI", "gpt-4") agent_node = result["data"]["nodes"][0] model_value = agent_node["data"]["node"]["template"]["model"]["value"] assert len(model_value) == 1 assert model_value[0]["name"] == "gpt-4" assert model_value[0]["provider"] == "OpenAI" def test_should_not_modify_non_agent_nodes(self): """Should not modify nodes that are not Agent type.""" flow_data = { "data": { "nodes": [ { "data": { "type": "TextInput", "node": {"template": {}}, } } ] } } with patch("langflow.agentic.services.flow_preparation.get_provider_config") as mock_config: mock_config.return_value = { "variable_name": "TEST_KEY", "api_key_param": "api_key", "model_class": "TestModel", "model_name_param": "model", "icon": "Test", } result = inject_model_into_flow(flow_data, "Test", "test-model") node = result["data"]["nodes"][0] assert "model" not in node["data"]["node"]["template"] def test_should_use_custom_api_key_var(self): """Should use provided api_key_var instead of default.""" flow_data = {"data": {"nodes": []}} with patch("langflow.agentic.services.flow_preparation.get_provider_config") as mock_config: mock_config.return_value = { "variable_name": "DEFAULT_KEY", "api_key_param": "api_key", "model_class": "TestModel", "model_name_param": "model", "icon": "Test", } # Should not raise even with empty nodes result = inject_model_into_flow(flow_data, "Test", "test-model", api_key_var="CUSTOM_KEY") assert result is not None class TestExtractResponseText: """Tests for extract_response_text function.""" def test_should_extract_from_result_key(self): """Should extract text from 'result' key.""" data = {"result": "Hello, world!"} result = extract_response_text(data) assert result == "Hello, world!" def test_should_extract_from_text_key(self): """Should extract text from 'text' key when result not present.""" data = {"text": "Hello from text key"} result = extract_response_text(data) assert result == "Hello from text key" def test_should_extract_from_exception_message(self): """Should extract exception message.""" data = {"exception_message": "Error occurred"} result = extract_response_text(data) assert result == "Error occurred" def test_should_prefer_result_over_text(self): """Should prefer 'result' key over 'text' key.""" data = {"result": "From result", "text": "From text"} result = extract_response_text(data) assert result == "From result" def test_should_return_string_representation_for_unknown_structure(self): """Should return string representation for unknown structure.""" data = {"custom_key": "custom_value", "another": 123} result = extract_response_text(data) assert "custom_key" in result or "custom_value" in result def test_should_handle_empty_dict(self): """Should handle empty dictionary.""" result = extract_response_text({}) assert result == "{}" class TestParseEventData: """Tests for parse_event_data function.""" def test_should_parse_valid_event(self): """Should parse valid event data.""" data = b'{"event": "token", "data": {"chunk": "Hello"}}' event_type, event_data = parse_event_data(data) assert event_type == "token" assert event_data == {"chunk": "Hello"} def test_should_return_none_for_empty_data(self): """Should return None event type for empty data.""" event_type, event_data = parse_event_data(b"") assert event_type is None assert event_data == {} def test_should_return_none_for_whitespace_only(self): """Should return None for whitespace-only data.""" event_type, event_data = parse_event_data(b" \n\t ") assert event_type is None assert event_data == {} def test_should_handle_event_without_data(self): """Should handle event without data field.""" data = b'{"event": "end"}' event_type, event_data = parse_event_data(data) assert event_type == "end" assert event_data == {} class TestExecuteFlowFile: """Tests for execute_flow_file function.""" @pytest.mark.asyncio async def test_should_raise_404_for_missing_flow_file(self): """Should raise HTTPException 404 for missing flow file.""" with pytest.raises(HTTPException) as exc_info: await execute_flow_file("nonexistent_flow.json") assert exc_info.value.status_code == 404 assert "not found" in exc_info.value.detail.lower() @pytest.mark.asyncio async def test_should_execute_flow_with_model_injection(self): """Should execute flow with model preparation when provider and model specified.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.prepare = MagicMock() async def mock_async_start(*_args, **_kwargs): yield {"result": "success"} mock_graph.async_start = mock_async_start with ( patch( "langflow.agentic.services.flow_executor.resolve_flow_path", return_value=(Path("/fake/path/test.json"), "json"), ), patch( "langflow.agentic.services.flow_executor.load_graph_for_execution", new_callable=AsyncMock, return_value=mock_graph, ) as mock_load, ): result = await execute_flow_file( "test.json", input_value="test", provider="OpenAI", model_name="gpt-4", ) mock_load.assert_called_once() # Result goes through extract_structured_result which may transform it assert result is not None @pytest.mark.asyncio async def test_should_raise_500_on_execution_error(self): """Should raise HTTPException 500 on execution error.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.prepare = MagicMock() async def mock_async_start(*_args, **_kwargs): msg = "Execution failed" raise RuntimeError(msg) yield mock_graph.async_start = mock_async_start with ( patch( "langflow.agentic.services.flow_executor.resolve_flow_path", return_value=(Path("/fake/path/test.json"), "json"), ), patch( "langflow.agentic.services.flow_executor.load_graph_for_execution", new_callable=AsyncMock, return_value=mock_graph, ), pytest.raises(HTTPException) as exc_info, ): await execute_flow_file("test.json") assert exc_info.value.status_code == 500 class TestLoadAndPrepareFlow: """Tests for load_and_prepare_flow function.""" def test_should_load_and_return_json_string(self): """Should load flow file and return JSON string.""" mock_flow_data = {"data": {"nodes": []}} mock_path = MagicMock() mock_path.read_text.return_value = json.dumps(mock_flow_data) result = load_and_prepare_flow(mock_path, None, None, None) assert isinstance(result, str) parsed = json.loads(result) assert parsed == mock_flow_data def test_should_inject_model_when_provider_and_model_specified(self): """Should inject model when provider and model_name are specified.""" mock_flow_data = {"data": {"nodes": []}} mock_path = MagicMock() mock_path.read_text.return_value = json.dumps(mock_flow_data) with patch( "langflow.agentic.services.flow_preparation.inject_model_into_flow", return_value={"data": {"nodes": [], "injected": True}}, ) as mock_inject: result = load_and_prepare_flow(mock_path, "OpenAI", "gpt-4", None) mock_inject.assert_called_once() parsed = json.loads(result) assert parsed["data"].get("injected") is True class TestExecuteFlowFileStreaming: """Tests for execute_flow_file_streaming function.""" @pytest.mark.asyncio async def test_should_raise_404_for_missing_flow_file(self): """Should raise HTTPException 404 for missing flow file.""" with pytest.raises(HTTPException) as exc_info: async for _ in execute_flow_file_streaming("nonexistent_flow.json"): pass assert exc_info.value.status_code == 404 @pytest.mark.asyncio async def test_should_yield_token_and_end_events(self): """Should yield token events followed by end event.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.prepare = MagicMock() async def mock_async_start(*_args, **_kwargs): yield {"result": "complete"} mock_graph.async_start = mock_async_start # This test verifies the streaming setup and basic flow with ( patch( "langflow.agentic.services.flow_executor.resolve_flow_path", return_value=(Path("/fake/path/test.json"), "json"), ), patch( "langflow.agentic.services.flow_executor.load_graph_for_execution", new_callable=AsyncMock, return_value=mock_graph, ), patch("langflow.agentic.services.flow_executor.create_default_event_manager"), ): # The streaming function is complex; for unit tests we verify setup pass class TestConstants: """Tests for module constants.""" def test_flows_base_path_should_point_to_flows_directory(self): """FLOWS_BASE_PATH should point to the flows directory.""" assert FLOWS_BASE_PATH.name == "flows" assert FLOWS_BASE_PATH.parent.name == "agentic" def test_streaming_queue_max_size_should_be_reasonable(self): """STREAMING_QUEUE_MAX_SIZE should be reasonable.""" assert STREAMING_QUEUE_MAX_SIZE > 100 assert STREAMING_QUEUE_MAX_SIZE <= 10000 def test_streaming_timeout_should_be_reasonable(self): """STREAMING_EVENT_TIMEOUT_SECONDS should be reasonable.""" assert STREAMING_EVENT_TIMEOUT_SECONDS > 30 assert STREAMING_EVENT_TIMEOUT_SECONDS <= 600
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/services/test_flow_executor.py", "license": "MIT License", "lines": 306, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/agentic/services/test_provider_service.py
"""Tests for provider service. Tests the provider configuration and API key checking functionality. """ import os from unittest.mock import AsyncMock, MagicMock, patch from uuid import UUID import pytest from langflow.agentic.services.provider_service import ( DEFAULT_MODELS, PREFERRED_PROVIDERS, check_api_key, get_default_model, get_default_provider, get_enabled_providers_for_user, ) class TestPreferredProviders: """Tests for PREFERRED_PROVIDERS configuration.""" def test_should_have_expected_providers(self): """Should have expected providers in preferred order.""" assert "Anthropic" in PREFERRED_PROVIDERS assert "OpenAI" in PREFERRED_PROVIDERS assert "Google Generative AI" in PREFERRED_PROVIDERS assert "Groq" in PREFERRED_PROVIDERS def test_anthropic_should_be_first_preference(self): """Anthropic should be the first preferred provider.""" assert PREFERRED_PROVIDERS[0] == "Anthropic" def test_should_have_at_least_two_providers(self): """Should have at least two providers for fallback.""" assert len(PREFERRED_PROVIDERS) >= 2 class TestDefaultModels: """Tests for DEFAULT_MODELS configuration.""" def test_should_have_model_for_each_preferred_provider(self): """Should have a default model for each preferred provider.""" for provider in PREFERRED_PROVIDERS: assert provider in DEFAULT_MODELS, f"Missing default model for {provider}" def test_default_models_should_be_non_empty_strings(self): """Default model names should be non-empty strings.""" for model in DEFAULT_MODELS.values(): assert isinstance(model, str) assert len(model) > 0 class TestGetDefaultProvider: """Tests for get_default_provider function.""" def test_should_return_first_preferred_when_available(self): """Should return first preferred provider when available.""" enabled = ["OpenAI", "Anthropic", "Groq"] result = get_default_provider(enabled) assert result == "Anthropic" # First in PREFERRED_PROVIDERS that's enabled def test_should_return_second_preferred_when_first_not_available(self): """Should return second preferred when first is not available.""" enabled = ["OpenAI", "Groq"] # Anthropic not included result = get_default_provider(enabled) assert result == "OpenAI" # Second in PREFERRED_PROVIDERS def test_should_return_first_enabled_when_no_preferred_available(self): """Should return first enabled when no preferred provider available.""" enabled = ["CustomProvider", "AnotherProvider"] result = get_default_provider(enabled) assert result == "CustomProvider" def test_should_return_none_for_empty_list(self): """Should return None when no providers enabled.""" result = get_default_provider([]) assert result is None def test_should_respect_preferred_order(self): """Should respect the order of PREFERRED_PROVIDERS.""" enabled = ["Groq", "Google Generative AI", "OpenAI"] result = get_default_provider(enabled) # Should be OpenAI since it comes before Groq and Google in PREFERRED_PROVIDERS assert result == "OpenAI" class TestGetDefaultModel: """Tests for get_default_model function.""" def test_should_return_model_for_known_provider(self): """Should return default model for known provider.""" result = get_default_model("Anthropic") assert result is not None assert isinstance(result, str) assert "claude" in result.lower() def test_should_return_model_for_openai(self): """Should return default model for OpenAI.""" result = get_default_model("OpenAI") assert result is not None assert "gpt" in result.lower() def test_should_return_none_for_unknown_provider(self): """Should return None for unknown provider.""" result = get_default_model("UnknownProvider") assert result is None class TestCheckApiKey: """Tests for check_api_key function.""" @pytest.mark.asyncio async def test_should_return_key_from_variable_service(self): """Should return API key from variable service when available.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(return_value="test-api-key") mock_session = MagicMock() user_id = UUID("12345678-1234-5678-1234-567812345678") result = await check_api_key(mock_service, user_id, "OPENAI_API_KEY", mock_session) assert result == "test-api-key" mock_service.get_variable.assert_called_once_with(user_id, "OPENAI_API_KEY", "", mock_session) @pytest.mark.asyncio async def test_should_fallback_to_env_when_not_in_service(self): """Should fallback to environment variable when not in service.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(side_effect=ValueError("Not found")) mock_session = MagicMock() user_id = "test-user" with patch.dict(os.environ, {"OPENAI_API_KEY": "env-api-key"}): result = await check_api_key(mock_service, user_id, "OPENAI_API_KEY", mock_session) assert result == "env-api-key" @pytest.mark.asyncio async def test_should_return_none_when_not_found_anywhere(self): """Should return None when key not found in service or env.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(side_effect=ValueError("Not found")) mock_session = MagicMock() user_id = "test-user" # Ensure env var is not set with patch.dict(os.environ, {}, clear=True): # Remove the key if it exists os.environ.pop("TEST_API_KEY", None) result = await check_api_key(mock_service, user_id, "TEST_API_KEY", mock_session) assert result is None @pytest.mark.asyncio async def test_should_return_env_when_service_returns_empty(self): """Should check env when service returns empty string.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(return_value="") mock_session = MagicMock() user_id = "test-user" with patch.dict(os.environ, {"TEST_KEY": "env-value"}): result = await check_api_key(mock_service, user_id, "TEST_KEY", mock_session) assert result == "env-value" @pytest.mark.asyncio async def test_should_accept_string_user_id(self): """Should accept string user_id.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(return_value="key") mock_session = MagicMock() result = await check_api_key(mock_service, "string-user-id", "API_KEY", mock_session) assert result == "key" @pytest.mark.asyncio async def test_should_accept_uuid_user_id(self): """Should accept UUID user_id.""" mock_service = MagicMock() mock_service.get_variable = AsyncMock(return_value="key") mock_session = MagicMock() user_id = UUID("12345678-1234-5678-1234-567812345678") result = await check_api_key(mock_service, user_id, "API_KEY", mock_session) assert result == "key" class TestGetEnabledProvidersForUser: """Tests for get_enabled_providers_for_user function.""" @pytest.mark.asyncio async def test_should_return_empty_when_service_not_database(self): """Should return empty lists when service is not DatabaseVariableService.""" mock_session = MagicMock() user_id = "test-user" with patch("langflow.agentic.services.provider_service.get_variable_service") as mock_get_service: mock_get_service.return_value = MagicMock() # Not DatabaseVariableService result = await get_enabled_providers_for_user(user_id, mock_session) assert result == ([], {}) # Note: Testing get_enabled_providers_for_user with credentials requires # complex mocking of DatabaseVariableService and isinstance checks. # This is better suited for integration tests with actual database setup. class TestProviderServiceIntegration: """Integration tests for provider service.""" def test_default_provider_should_have_default_model(self): """Default provider should have a corresponding default model.""" for provider in PREFERRED_PROVIDERS: model = get_default_model(provider) assert model is not None, f"No default model for preferred provider {provider}" def test_get_default_provider_returns_valid_provider(self): """get_default_provider should return a provider with a default model.""" enabled = PREFERRED_PROVIDERS.copy() provider = get_default_provider(enabled) assert provider is not None assert get_default_model(provider) is not None
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/agentic/services/test_provider_service.py", "license": "MIT License", "lines": 173, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/alembic/versions/369268b9af8b_add_job_id_to_vertex_build_create_job_.py
"""add job_id to vertex_build, create job status table. Revision ID: 369268b9af8b Revises: 182e5471b900 Create Date: 2026-01-28 13:00:52.967282 Phase: EXPAND """ from collections.abc import Sequence import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision: str = "369268b9af8b" # pragma: allowlist secret down_revision: str | None = "182e5471b900" # pragma: allowlist secret branch_labels: str | Sequence[str] | None = None depends_on: str | Sequence[str] | None = None def upgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### from langflow.utils import migration conn = op.get_bind() if not migration.table_exists("job", conn): op.create_table( "job", sa.Column("job_id", sa.Uuid(), nullable=False), sa.Column("flow_id", sa.Uuid(), nullable=False), sa.Column( "status", sa.Enum( "queued", "in_progress", "completed", "failed", "cancelled", "timed_out", name="job_status_enum", ), nullable=False, ), sa.Column("created_timestamp", sa.DateTime(timezone=True), nullable=False), sa.Column("finished_timestamp", sa.DateTime(timezone=True), nullable=True), sa.PrimaryKeyConstraint("job_id"), ) with op.batch_alter_table("job", schema=None) as batch_op: batch_op.create_index(batch_op.f("ix_job_flow_id"), ["flow_id"], unique=False) batch_op.create_index(batch_op.f("ix_job_job_id"), ["job_id"], unique=False) batch_op.create_index(batch_op.f("ix_job_status"), ["status"], unique=False) if not migration.column_exists("vertex_build", "job_id", conn): with op.batch_alter_table("vertex_build", schema=None) as batch_op: batch_op.add_column(sa.Column("job_id", sa.Uuid(), nullable=True)) batch_op.create_index(batch_op.f("ix_vertex_build_job_id"), ["job_id"], unique=False) # ### end Alembic commands ### def downgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### from langflow.utils import migration conn = op.get_bind() # Drop vertex_build column if it exists if migration.column_exists("vertex_build", "job_id", conn): with op.batch_alter_table("vertex_build", schema=None) as batch_op: try: batch_op.drop_index(batch_op.f("ix_vertex_build_job_id")) except Exception: # noqa: S110 pass # Index might not exist batch_op.drop_column("job_id") # Drop job table if it exists (this will automatically drop all indexes) if migration.table_exists("job", conn): op.drop_table("job") # ### end Alembic commands ###
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/alembic/versions/369268b9af8b_add_job_id_to_vertex_build_create_job_.py", "license": "MIT License", "lines": 65, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/api/v2/workflow_reconstruction.py
"""Workflow response reconstruction from vertex_build table. This module reconstructs WorkflowExecutionResponse from vertex_build table data by job_id, enabling retrieval of past execution results without re-running workflows. """ from __future__ import annotations from typing import TYPE_CHECKING from lfx.graph.graph.base import Graph from lfx.graph.schema import ResultData, RunOutputs from lfx.schema.workflow import WorkflowExecutionRequest from langflow.api.v1.schemas import RunResponse from langflow.api.v2.converters import run_response_to_workflow_response from langflow.services.database.models.vertex_builds.crud import get_vertex_builds_by_job_id if TYPE_CHECKING: from sqlmodel.ext.asyncio.session import AsyncSession from langflow.services.database.models.flow.model import FlowRead async def reconstruct_workflow_response_from_job_id( session: AsyncSession, flow: FlowRead, job_id: str, user_id: str, ): """Reconstruct WorkflowExecutionResponse from vertex_builds by job_id. Args: session: Database session (readonly for performance) flow: Flow model from database job_id: Job ID to query vertex builds user_id: User ID for graph construction Returns: WorkflowExecutionResponse reconstructed from vertex_build data Raises: ValueError: If flow has no data or no vertex builds found for job_id """ # Validate flow data if not flow.data: msg = f"Flow {flow.id} has no data" raise ValueError(msg) # Query vertex_builds by job_id vertex_builds = await get_vertex_builds_by_job_id(session, job_id) if not vertex_builds: msg = f"No vertex builds found for job_id {job_id}" raise ValueError(msg) # Build graph to identify terminal nodes flow_id_str = str(flow.id) graph = Graph.from_payload(flow.data, flow_id=flow_id_str, user_id=user_id, flow_name=flow.name) terminal_node_ids = graph.get_terminal_nodes() # Filter to terminal vertices with data terminal_vertex_builds = [vb for vb in vertex_builds if vb.id in terminal_node_ids and vb.data] if not terminal_vertex_builds: msg = f"No terminal vertex builds found for job_id {job_id}" raise ValueError(msg) # Convert vertex_build data to RunOutputs format run_outputs_list = [RunOutputs(inputs={}, outputs=[ResultData(**vb.data)]) for vb in terminal_vertex_builds] # Create RunResponse and convert to WorkflowExecutionResponse run_response = RunResponse(outputs=run_outputs_list, session_id=None) workflow_request = WorkflowExecutionRequest(flow_id=flow_id_str, inputs={}) return run_response_to_workflow_response( run_response=run_response, flow_id=flow_id_str, job_id=job_id, workflow_request=workflow_request, graph=graph, )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/api/v2/workflow_reconstruction.py", "license": "MIT License", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/services/database/models/jobs/crud.py
from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Sequence from uuid import UUID from sqlmodel.ext.asyncio.session import AsyncSession from sqlmodel import col, select from langflow.services.database.models.jobs.model import Job, JobStatus async def get_jobs_by_flow_id(db: AsyncSession, flow_id: UUID, page: int = 1, size: int = 10) -> list[Job]: """Get jobs by flow ID with pagination. Args: db: Async database session flow_id: The flow ID to filter jobs by page: Page number (1-indexed) size: Number of jobs per page Returns: List of Job objects for the specified flow """ statement = ( select(Job) .where(Job.flow_id == flow_id) .order_by(col(Job.created_timestamp).desc()) .offset((page - 1) * size) .limit(size) ) result = await db.exec(statement) return list(result.all()) async def get_job_by_job_id(db: AsyncSession, job_id: UUID) -> Job | None: """Get a single job by its UUID. Args: db: Async database session job_id: The job ID to fetch Returns: Job object or None if not found """ statement = select(Job).where(Job.job_id == job_id) result = await db.exec(statement) return result.first() async def update_job_status(db: AsyncSession, job_id: UUID, status: JobStatus) -> Job | None: """Update the status of a job. Args: db: Async database session job_id: The job ID to update status: The new status value Returns: Updated Job object or None if not found """ job = await get_job_by_job_id(db, job_id) if job: job.status = status db.add(job) await db.flush() await db.refresh(job) return job async def get_latest_jobs_by_asset_ids(db: AsyncSession, asset_ids: Sequence[UUID]) -> dict[UUID, Job]: """Get the latest job for each asset ID in a single query. Args: db: Async database session asset_ids: List of asset IDs to fetch jobs for Returns: Dictionary mapping asset_id to the latest Job object """ if not asset_ids: return {} # Query all jobs for the given asset IDs, ordered by created_timestamp descending statement = select(Job).where(col(Job.asset_id).in_(asset_ids)).order_by(col(Job.created_timestamp).desc()) result = await db.exec(statement) all_jobs = result.all() # Build a dictionary with the latest job per asset_id latest_jobs: dict[UUID, Job] = {} for job in all_jobs: if job.asset_id and job.asset_id not in latest_jobs: latest_jobs[job.asset_id] = job return latest_jobs
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/database/models/jobs/crud.py", "license": "MIT License", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/backend/base/langflow/services/database/models/jobs/model.py
from datetime import datetime, timezone from enum import Enum from uuid import UUID from sqlalchemy import Column, DateTime from sqlalchemy import Enum as SQLEnum from sqlmodel import Field, SQLModel class JobStatus(str, Enum): QUEUED = "queued" IN_PROGRESS = "in_progress" COMPLETED = "completed" FAILED = "failed" CANCELLED = "cancelled" TIMED_OUT = "timed_out" class JobType(str, Enum): """Enum to specify type of job. WORKFLOW: for workflow execution INGESTION: for knowledge base ingestion EVALUATION: for evaluation of workflows. Can be extended in future for other types of jobs. """ WORKFLOW = "workflow" INGESTION = "ingestion" EVALUATION = "evaluation" class JobBase(SQLModel): job_id: UUID = Field(primary_key=True, index=True) flow_id: UUID = Field(index=True) status: JobStatus = Field( default=JobStatus.QUEUED, sa_column=Column( SQLEnum(JobStatus, name="job_status_enum", values_callable=lambda obj: [item.value for item in obj]), nullable=False, index=False, ), ) created_timestamp: datetime = Field( default_factory=lambda: datetime.now(timezone.utc), sa_column=Column(DateTime(timezone=True), nullable=False), ) finished_timestamp: datetime | None = Field( default=None, sa_column=Column(DateTime(timezone=True), nullable=True), ) type: JobType | None = Field( default=JobType.WORKFLOW, sa_column=Column( SQLEnum(JobType, name="job_type_enum", values_callable=lambda obj: [item.value for item in obj]), nullable=True, index=True, ), ) user_id: UUID | None = Field(index=True, nullable=True) asset_id: UUID | None = Field(index=True, nullable=True) asset_type: str | None = Field( index=False, nullable=True ) # Polymorphic: records if job is related to an entity like a KB, workflow, etc. class Job(JobBase, table=True): # type: ignore[call-arg] __tablename__ = "job"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/database/models/jobs/model.py", "license": "MIT License", "lines": 57, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/services/jobs/factory.py
"""Factory for creating JobService instances.""" from langflow.services.factory import ServiceFactory from langflow.services.jobs.service import JobService class JobServiceFactory(ServiceFactory): """Factory for creating JobService instances.""" def __init__(self): super().__init__(JobService) self._instance = None def create(self): """Create a JobService instance. Returns: JobService instance """ if self._instance is None: self._instance = JobService() return self._instance
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/jobs/factory.py", "license": "MIT License", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/services/jobs/service.py
"""Job service for managing workflow job status and tracking.""" from __future__ import annotations import asyncio from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Sequence from datetime import datetime, timezone from uuid import UUID from langflow.services.base import Service from langflow.services.database.models.jobs.crud import ( get_job_by_job_id, get_jobs_by_flow_id, get_latest_jobs_by_asset_ids, update_job_status, ) from langflow.services.database.models.jobs.model import Job, JobStatus, JobType from langflow.services.deps import session_scope class JobService(Service): """Service for managing workflow jobs.""" name = "jobs_service" def __init__(self): """Initialize the job service.""" self.set_ready() async def get_jobs_by_flow_id(self, flow_id: UUID | str, page: int = 1, page_size: int = 10) -> list[Job]: """Get jobs for a specific flow with pagination. Args: flow_id: The flow ID to filter jobs by page: Page number (1-indexed) page_size: Number of jobs per page Returns: List of Job objects for the specified flow """ if isinstance(flow_id, str): flow_id = UUID(flow_id) async with session_scope() as session: return await get_jobs_by_flow_id(session, flow_id, page=page, size=page_size) async def get_job_by_job_id(self, job_id: UUID | str) -> Job | None: """Get job for a specific job ID. Args: job_id: The job ID to filter jobs by Returns: Job object for the specified job ID """ if isinstance(job_id, str): job_id = UUID(job_id) async with session_scope() as session: return await get_job_by_job_id(session, job_id) async def create_job( self, job_id: UUID, flow_id: UUID, job_type: JobType = JobType.WORKFLOW, asset_id: UUID | None = None, asset_type: str | None = None, ) -> Job: """Create a new job record with QUEUED status. Args: job_id: The job ID flow_id: The flow ID job_type: The job type asset_id: The asset ID asset_type: The asset type Returns: Created Job object """ if isinstance(job_id, str): job_id = UUID(job_id) if isinstance(flow_id, str): flow_id = UUID(flow_id) async with session_scope() as session: job = Job( job_id=job_id, flow_id=flow_id, status=JobStatus.QUEUED, type=job_type, asset_id=asset_id, asset_type=asset_type, ) session.add(job) await session.flush() return job async def update_job_status( self, job_id: UUID, status: JobStatus, *, finished_timestamp: bool = False ) -> Job | None: """Update job status and optionally set finished timestamp. Args: job_id: The job ID to update status: New status value finished_timestamp: If True, set finished_timestamp to current time Returns: Updated Job object or None if not found """ async with session_scope() as session: job = await update_job_status(session, job_id, status) if job and finished_timestamp: job.finished_timestamp = datetime.now(timezone.utc) session.add(job) await session.flush() return job async def get_latest_jobs_by_asset_ids(self, asset_ids: Sequence[UUID | str]) -> dict[UUID, Job]: """Get the latest job for each asset ID in a single batch query. Args: asset_ids: List of asset IDs (UUID or string) to fetch jobs for Returns: Dictionary mapping asset_id (UUID) to the latest Job object """ # Convert all asset_ids to UUID uuid_asset_ids = [UUID(aid) if isinstance(aid, str) else aid for aid in asset_ids] async with session_scope() as session: return await get_latest_jobs_by_asset_ids(session, uuid_asset_ids) async def execute_with_status(self, job_id: UUID, run_coro_func, *args, **kwargs): """Wrapper that manages job status lifecycle around a coroutine. This function: 1. Updates status to IN_PROGRESS before execution 2. Executes the wrapped function 3. Updates status to COMPLETED on success or FAILED on error 4. Sets finished_timestamp when done Args: job_id: The job ID run_coro_func: The coroutine function to wrap *args: Positional arguments to pass to run_coro_func **kwargs: Keyword arguments to pass to run_coro_func Returns: The result from run_coro_func Raises: Exception: Re-raises any exception from run_coro_func after updating status """ from lfx.log import logger await logger.ainfo(f"Starting job execution: job_id={job_id}") try: # Update to IN_PROGRESS await logger.adebug(f"Updating job {job_id} status to IN_PROGRESS") await self.update_job_status(job_id, JobStatus.IN_PROGRESS) # Execute the wrapped function await logger.ainfo(f"Executing job function for job_id={job_id}") result = await run_coro_func(*args, **kwargs) except AssertionError as e: # Handle missing required arguments await logger.aerror(f"Job {job_id} failed with AssertionError: {e}") await self.update_job_status(job_id, JobStatus.FAILED, finished_timestamp=True) raise except asyncio.TimeoutError as e: # Handle timeout specifically await logger.aerror(f"Job {job_id} timed out: {e}") await self.update_job_status(job_id, JobStatus.TIMED_OUT, finished_timestamp=True) raise except asyncio.CancelledError as exc: # Check the message code to determine if this was user-initiated or system-initiated if exc.args and exc.args[0] == "LANGFLOW_USER_CANCELLED": # User-initiated cancellation, update status to CANCELLED await logger.awarning(f"Job {job_id} was cancelled by user") await self.update_job_status(job_id, JobStatus.CANCELLED, finished_timestamp=True) else: # System-initiated cancellation - update status to FAILED await logger.aerror(f"Job {job_id} was cancelled by system") await self.update_job_status(job_id, JobStatus.FAILED, finished_timestamp=True) raise except Exception as e: # Handle any other error await logger.aexception(f"Job {job_id} failed with unexpected error: {e}") await self.update_job_status(job_id, JobStatus.FAILED, finished_timestamp=True) raise else: # Update to COMPLETED await logger.ainfo(f"Job {job_id} completed successfully") await self.update_job_status(job_id, JobStatus.COMPLETED, finished_timestamp=True) return result
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/jobs/service.py", "license": "MIT License", "lines": 167, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/api/v2/test_workflow_reconstruction.py
"""Unit tests for workflow reconstruction from vertex_build table. Test Coverage: - Successful reconstruction with terminal nodes - Reconstruction with no vertex builds found (error case) - Reconstruction with flow having no data (error case) - Reconstruction filtering to terminal nodes only """ from datetime import datetime, timezone from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest from langflow.api.v2.workflow_reconstruction import reconstruct_workflow_response_from_job_id from langflow.services.database.models.vertex_builds.model import VertexBuildTable class TestWorkflowReconstruction: """Unit tests for workflow reconstruction logic.""" async def test_reconstruct_success_with_terminal_nodes(self): """Test successful reconstruction filters to terminal nodes and returns response.""" flow_id = uuid4() job_id = uuid4() user_id = uuid4() # Mock flow mock_flow = MagicMock() mock_flow.id = flow_id mock_flow.data = {"nodes": [{"id": "node1"}, {"id": "node2"}], "edges": []} # Mock vertex_builds mock_vb1 = MagicMock(spec=VertexBuildTable) mock_vb1.id = "node1" mock_vb1.data = {"outputs": {"result": "output1"}} mock_vb1.artifacts = {} mock_vb1.timestamp = datetime.now(timezone.utc) mock_vb2 = MagicMock(spec=VertexBuildTable) mock_vb2.id = "node2" mock_vb2.data = {"outputs": {"result": "output2"}} mock_vb2.artifacts = {} mock_vb2.timestamp = datetime.now(timezone.utc) mock_session = MagicMock() with ( patch("langflow.api.v2.workflow_reconstruction.get_vertex_builds_by_job_id") as mock_get_vb, patch("langflow.api.v2.workflow_reconstruction.Graph") as mock_graph_class, patch("langflow.api.v2.workflow_reconstruction.run_response_to_workflow_response") as mock_converter, ): mock_get_vb.return_value = [mock_vb1, mock_vb2] mock_graph = MagicMock() mock_graph.get_terminal_nodes.return_value = ["node1", "node2"] mock_graph_class.from_payload.return_value = mock_graph mock_response = MagicMock() mock_response.flow_id = str(flow_id) mock_response.job_id = str(job_id) mock_converter.return_value = mock_response result = await reconstruct_workflow_response_from_job_id( session=mock_session, flow=mock_flow, job_id=str(job_id), user_id=user_id, ) assert result.flow_id == str(flow_id) assert result.job_id == str(job_id) mock_get_vb.assert_called_once_with(mock_session, str(job_id)) mock_graph.get_terminal_nodes.assert_called_once() async def test_reconstruct_fails_when_no_vertex_builds(self): """Test reconstruction raises ValueError when no vertex_builds found.""" mock_flow = MagicMock() mock_flow.data = {"nodes": [{"id": "node1"}], "edges": []} mock_session = MagicMock() with patch("langflow.api.v2.workflow_reconstruction.get_vertex_builds_by_job_id") as mock_get_vb: mock_get_vb.return_value = [] with pytest.raises(ValueError, match="No vertex builds found"): await reconstruct_workflow_response_from_job_id( session=mock_session, flow=mock_flow, job_id=str(uuid4()), user_id=uuid4(), ) async def test_reconstruct_fails_when_flow_has_no_data(self): """Test reconstruction raises ValueError when flow has no data.""" mock_flow = MagicMock() mock_flow.data = None mock_session = MagicMock() with pytest.raises(ValueError, match="has no data"): await reconstruct_workflow_response_from_job_id( session=mock_session, flow=mock_flow, job_id=str(uuid4()), user_id=uuid4(), ) async def test_reconstruct_filters_to_terminal_nodes_only(self): """Test reconstruction only includes terminal node outputs, not intermediate nodes.""" flow_id = uuid4() job_id = uuid4() user_id = uuid4() mock_flow = MagicMock() mock_flow.id = flow_id mock_flow.data = {"nodes": [{"id": "node1"}, {"id": "node2"}, {"id": "node3"}], "edges": []} # Create vertex_builds for all 3 nodes mock_vertex_builds = [] for node_id in ["node1", "node2", "node3"]: mock_vb = MagicMock(spec=VertexBuildTable) mock_vb.id = node_id mock_vb.data = {"outputs": {"result": f"output_{node_id}"}} mock_vb.artifacts = {} mock_vb.timestamp = datetime.now(timezone.utc) mock_vertex_builds.append(mock_vb) mock_session = MagicMock() with ( patch("langflow.api.v2.workflow_reconstruction.get_vertex_builds_by_job_id") as mock_get_vb, patch("langflow.api.v2.workflow_reconstruction.Graph") as mock_graph_class, patch("langflow.api.v2.workflow_reconstruction.run_response_to_workflow_response") as mock_converter, ): mock_get_vb.return_value = mock_vertex_builds # Only node1 and node3 are terminal nodes (node2 is intermediate) mock_graph = MagicMock() mock_graph.get_terminal_nodes.return_value = ["node1", "node3"] mock_graph_class.from_payload.return_value = mock_graph mock_response = MagicMock() mock_converter.return_value = mock_response result = await reconstruct_workflow_response_from_job_id( session=mock_session, flow=mock_flow, job_id=str(job_id), user_id=user_id, ) assert result is not None mock_converter.assert_called_once() # Verify filtering happened by checking terminal nodes were retrieved mock_graph.get_terminal_nodes.assert_called_once()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v2/test_workflow_reconstruction.py", "license": "MIT License", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/api/v1/test_flow_folder_integrity.py
"""Tests for flow-folder integrity to prevent orphaned flows. These tests verify the fix for the bug where flows could be created without a valid folder_id when all folders were deleted (zero folders scenario), resulting in orphaned flows that were unreachable in the UI. The fix ensures: 1. Flows always have a valid folder_id 2. If a non-existent folder_id is provided, the system falls back to the default folder 3. If no folders exist, a default folder is auto-created """ import uuid from fastapi import status from httpx import AsyncClient from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME from langflow.services.database.models.folder.model import Folder from langflow.services.deps import session_scope from sqlmodel import select async def test_create_flow_with_nonexistent_folder_id_assigns_default_folder( client: AsyncClient, logged_in_headers, active_user ): """Test that creating a flow with a non-existent folder_id assigns it to the default folder. This prevents orphaned flows when a folder is deleted between the UI loading and flow creation. """ non_existent_folder_id = str(uuid.uuid4()) flow_data = { "name": "Test Flow with Bad Folder", "data": {}, "folder_id": non_existent_folder_id, } response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert response.status_code == status.HTTP_201_CREATED result = response.json() # The flow should have been assigned to a valid folder (not the non-existent one) assert result["folder_id"] is not None assert result["folder_id"] != non_existent_folder_id # Verify the folder actually exists async with session_scope() as session: folder = await session.get(Folder, uuid.UUID(result["folder_id"])) assert folder is not None assert folder.user_id == active_user.id async def test_create_flow_without_folder_id_assigns_default_folder( client: AsyncClient, logged_in_headers, active_user ): """Test that creating a flow without a folder_id assigns it to the default folder.""" flow_data = { "name": "Test Flow without Folder ID", "data": {}, } response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert response.status_code == status.HTTP_201_CREATED result = response.json() # The flow should have been assigned to a valid folder assert result["folder_id"] is not None # Verify the folder actually exists and belongs to the user async with session_scope() as session: folder = await session.get(Folder, uuid.UUID(result["folder_id"])) assert folder is not None assert folder.user_id == active_user.id async def test_create_flow_after_all_folders_deleted_creates_default_folder( client: AsyncClient, logged_in_headers, active_user ): """Test the zero-folder scenario: creating a flow after deleting all folders. This is the critical bug fix test. When all folders are deleted, creating a new flow should automatically create a default folder instead of creating an orphaned flow. """ # First, delete all folders for this user async with session_scope() as session: stmt = select(Folder).where(Folder.user_id == active_user.id) folders = (await session.exec(stmt)).all() for folder in folders: await session.delete(folder) await session.commit() # Verify no folders exist for this user async with session_scope() as session: stmt = select(Folder).where(Folder.user_id == active_user.id) folders = (await session.exec(stmt)).all() assert len(folders) == 0, "All folders should be deleted" # Now create a flow - this should auto-create a default folder flow_data = { "name": "Flow Created After All Folders Deleted", "data": {}, } response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert response.status_code == status.HTTP_201_CREATED result = response.json() # The flow should NOT be orphaned - it should have a valid folder_id assert result["folder_id"] is not None # Verify the folder was auto-created and exists async with session_scope() as session: folder = await session.get(Folder, uuid.UUID(result["folder_id"])) assert folder is not None assert folder.user_id == active_user.id assert folder.name == DEFAULT_FOLDER_NAME async def test_update_flow_with_nonexistent_folder_id_assigns_default_folder( client: AsyncClient, logged_in_headers, active_user ): """Test that updating a flow with a non-existent folder_id falls back to default folder. This handles the case where a user tries to move a flow to a folder that doesn't exist. """ # Configure client to follow redirects (folders API uses redirects) client.follow_redirects = True # Create a flow in the default folder flow_data = { "name": "Flow to Update", "data": {}, } flow_response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert flow_response.status_code == status.HTTP_201_CREATED flow_id = flow_response.json()["id"] # Now try to update the flow with a non-existent folder_id non_existent_folder_id = str(uuid.uuid4()) update_data = { "name": "Updated Flow Name", "folder_id": non_existent_folder_id, # This folder doesn't exist } update_response = await client.patch(f"api/v1/flows/{flow_id}", json=update_data, headers=logged_in_headers) assert update_response.status_code == status.HTTP_200_OK result = update_response.json() # The flow should be reassigned to a valid folder (not the non-existent one) assert result["folder_id"] is not None assert result["folder_id"] != non_existent_folder_id # Verify the folder exists async with session_scope() as session: folder = await session.get(Folder, uuid.UUID(result["folder_id"])) assert folder is not None assert folder.user_id == active_user.id async def test_update_flow_without_folder_id_keeps_existing_folder(client: AsyncClient, logged_in_headers): """Test that updating a flow without specifying folder_id keeps the existing folder assignment.""" # Configure client to follow redirects client.follow_redirects = True # Create a flow flow_data = { "name": "Flow to Update", "data": {}, } create_response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert create_response.status_code == status.HTTP_201_CREATED flow_id = create_response.json()["id"] original_folder_id = create_response.json()["folder_id"] # Update the flow without specifying folder_id (only update name) update_data = { "name": "Updated Flow Name", } update_response = await client.patch(f"api/v1/flows/{flow_id}", json=update_data, headers=logged_in_headers) assert update_response.status_code == status.HTTP_200_OK result = update_response.json() # The folder_id should remain unchanged assert result["folder_id"] == original_folder_id async def test_upload_flow_with_nonexistent_folder_id_assigns_default( client: AsyncClient, logged_in_headers, active_user ): """Test that uploading a flow with a non-existent folder_id assigns it to the default folder. The upload endpoint uses _new_flow internally, which includes folder_id validation. """ import json non_existent_folder_id = str(uuid.uuid4()) flow_data = { "name": "Uploaded Flow with Bad Folder", "data": {}, "folder_id": non_existent_folder_id, } # Create a JSON file content for upload file_content = json.dumps(flow_data) response = await client.post( "api/v1/flows/upload/", files={"file": ("flow.json", file_content, "application/json")}, headers=logged_in_headers, ) assert response.status_code == status.HTTP_201_CREATED results = response.json() # The result is a list (even for single flow upload) assert len(results) == 1 result = results[0] # The flow should have a valid folder_id (not the non-existent one) assert result["folder_id"] is not None assert result["folder_id"] != non_existent_folder_id # Verify the folder exists async with session_scope() as session: folder = await session.get(Folder, uuid.UUID(result["folder_id"])) assert folder is not None assert folder.user_id == active_user.id async def test_flow_created_is_retrievable_in_folder(client: AsyncClient, logged_in_headers): """Test that a created flow can be retrieved by listing flows in its folder. This verifies the flow is not orphaned and appears in the UI. """ # Configure client to follow redirects client.follow_redirects = True # Create a flow flow_data = { "name": "Retrievable Flow", "data": {}, } create_response = await client.post("api/v1/flows/", json=flow_data, headers=logged_in_headers) assert create_response.status_code == status.HTTP_201_CREATED flow_id = create_response.json()["id"] folder_id = create_response.json()["folder_id"] # List flows in the folder response = await client.get(f"api/v1/folders/{folder_id}", headers=logged_in_headers) assert response.status_code == status.HTTP_200_OK # Check if the flow is in the folder's flows list result = response.json() # Handle different response structures if "flows" in result: flows = result["flows"] elif "folder" in result and "flows" in result["folder"]: flows = result["folder"]["flows"] else: # Response might be paginated or have different structure flows = result.get("flows", []) # Get flow IDs from the response flow_ids_in_folder = [f["id"] if isinstance(f, dict) else str(f) for f in flows] # The created flow should be in the folder's flow list assert flow_id in flow_ids_in_folder, f"Flow {flow_id} should be retrievable in folder {folder_id}" async def test_upsert_flow_with_nonexistent_folder_id_on_create(client: AsyncClient, logged_in_headers): """Test that PUT (upsert) with non-existent folder_id creates flow with default folder.""" specified_id = str(uuid.uuid4()) non_existent_folder_id = str(uuid.uuid4()) flow_data = { "name": "Upsert Flow with Bad Folder", "data": {}, "folder_id": non_existent_folder_id, } response = await client.put(f"api/v1/flows/{specified_id}", json=flow_data, headers=logged_in_headers) # The request should be rejected with 400 Bad Request since folder doesn't exist # This is the expected behavior based on the existing test_upsert_flow_returns_400_for_invalid_folder_id assert response.status_code == status.HTTP_400_BAD_REQUEST assert "folder not found" in response.json()["detail"].lower()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v1/test_flow_folder_integrity.py", "license": "MIT License", "lines": 226, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/components/llm_operations/test_llm_conditional_router.py
from unittest.mock import MagicMock, patch import pytest from lfx.components.llm_operations.llm_conditional_router import SmartRouterComponent from lfx.schema.message import Message from tests.base import ComponentTestBaseWithoutClient class TestSmartRouterComponent(ComponentTestBaseWithoutClient): @pytest.fixture def component_class(self): """Return the component class to test.""" return SmartRouterComponent @pytest.fixture def default_kwargs(self): """Return the default kwargs for the component.""" return { "model": [ { "name": "gpt-4o", "provider": "OpenAI", "icon": "OpenAI", "metadata": { "model_class": "ChatOpenAI", "model_name_param": "model", "api_key_param": "api_key", # pragma: allowlist secret }, } ], "input_text": "I love this product!", "routes": [ { "route_category": "Positive", "route_description": "Positive feedback, satisfaction, or compliments", "output_value": "", }, { "route_category": "Negative", "route_description": "Complaints, issues, or dissatisfaction", "output_value": "", }, ], "enable_else_output": False, } @pytest.fixture def file_names_mapping(self): """Return an empty list since this component doesn't have version-specific files.""" return [] def _create_component_with_mock_categorization(self, categorization_result, *, enable_else=False): """Helper to create a component with a mocked categorization result.""" component = SmartRouterComponent() component.routes = [ {"route_category": "Positive", "route_description": "Good feedback", "output_value": ""}, {"route_category": "Negative", "route_description": "Bad feedback", "output_value": ""}, ] component.input_text = "Test input" component.enable_else_output = enable_else component.message = None component._categorization_result = categorization_result component.stop = MagicMock() return component def test_positive_output(self): """Test routing to positive category.""" component = self._create_component_with_mock_categorization("Positive") result = component.process_case() assert isinstance(result, Message) assert result.text == "Test input" component.stop.assert_any_call("category_2_result") # Negative should be stopped def test_negative_output(self): """Test routing to negative category.""" component = self._create_component_with_mock_categorization("Negative") result = component.process_case() assert isinstance(result, Message) assert result.text == "Test input" component.stop.assert_any_call("category_1_result") # Positive should be stopped def test_neutral_output_no_match(self): """Test when input doesn't match any category (no else output).""" component = self._create_component_with_mock_categorization("NONE", enable_else=False) result = component.process_case() assert isinstance(result, Message) assert result.text == "" assert component.status == "No match found and Else output is disabled" def test_else_output(self): """Test else output when no category matches.""" component = self._create_component_with_mock_categorization("NONE", enable_else=True) result = component.default_response() assert isinstance(result, Message) assert result.text == "Test input" assert "Routed to Else (no match)" in component.status def test_categorization_caching(self): """Test that LLM categorization result is cached and only called once.""" component = SmartRouterComponent() component.routes = [ {"route_category": "Positive", "route_description": "", "output_value": ""}, ] component.input_text = "Great product!" component.model = [{"name": "test-model", "provider": "Test"}] component._user_id = "test-user" component.api_key = "test-key" # pragma: allowlist secret mock_llm = MagicMock() mock_response = MagicMock() mock_response.content = "Positive" mock_llm.invoke.return_value = mock_response with patch("lfx.components.llm_operations.llm_conditional_router.get_llm", return_value=mock_llm): result1 = component._get_categorization() result2 = component._get_categorization() assert result1 == "Positive" assert result2 == "Positive" assert mock_llm.invoke.call_count == 1 # LLM only called once due to caching
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/llm_operations/test_llm_conditional_router.py", "license": "MIT License", "lines": 106, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/base/io/test_chat.py
from lfx.base.io.chat import _extract_model_name class TestExtractModelName: def test_should_return_string_when_input_is_string(self): assert _extract_model_name("gpt-4o-mini") == "gpt-4o-mini" def test_should_return_name_when_input_is_model_input_list(self): model_input = [{"name": "gpt-4o-mini", "icon": "OpenAI", "provider": "OpenAI"}] assert _extract_model_name(model_input) == "gpt-4o-mini" def test_should_return_name_when_input_is_dict(self): model_dict = {"name": "claude-3", "provider": "Anthropic"} assert _extract_model_name(model_dict) == "claude-3" def test_should_return_none_when_input_is_empty_list(self): assert _extract_model_name([]) is None def test_should_return_none_when_input_is_none(self): assert _extract_model_name(None) is None def test_should_return_none_when_list_has_no_name_key(self): assert _extract_model_name([{"provider": "OpenAI"}]) is None def test_should_return_none_when_dict_has_no_name_key(self): assert _extract_model_name({"provider": "OpenAI"}) is None def test_should_return_none_when_input_is_integer(self): assert _extract_model_name(123) is None
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/base/io/test_chat.py", "license": "MIT License", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/performance/check_key_benchmark.py
# src/backend/tests/perf/check_key_benchmark.py import logging import statistics import time import uuid import pytest from langflow.services.auth import utils as auth_utils from langflow.services.database.models.api_key import crud as api_key_crud from langflow.services.database.models.api_key.model import ApiKey from langflow.services.database.models.user.model import User from langflow.services.deps import get_settings_service from sqlmodel.ext.asyncio.session import AsyncSession logger = logging.getLogger(__name__) def _get_test_password() -> str: """Generate a unique test password for benchmark runs.""" return str(uuid.uuid4()) class DummyResult: def __init__(self, items): self._items = items def all(self): return self._items async def benchmark_once( n_keys: int, iterations: int = 100, async_db_session: AsyncSession | None = None, ): settings_service = None try: settings_service = get_settings_service() except Exception: settings_service = None stored_rows = [] # generate N keys, keep one matching candidate_key to test hit candidate_raw = f"sk-test-{uuid.uuid4()}" for i in range(n_keys): raw = f"sk-test-{uuid.uuid4()}" if i == n_keys - 1: raw = candidate_raw try: stored = auth_utils.encrypt_api_key(raw, settings_service=settings_service) except Exception: stored = f"enc-{raw}" stored_rows.append((str(i), stored, str(uuid.uuid4()))) if async_db_session is not None: # use provided async session fixture to mimic DB db_session = async_db_session # create a user user = User(username=f"u-{uuid.uuid4()}", password=_get_test_password()) db_session.add(user) await db_session.flush() await db_session.refresh(user) for i, (_, stored, _uid) in enumerate(stored_rows): api = ApiKey(api_key=stored, name=f"k-{i}", user_id=user.id) db_session.add(api) await db_session.commit() timings = [] for _ in range(iterations): t0 = time.perf_counter() await api_key_crud._check_key_from_db(db_session, candidate_raw, settings_service) t1 = time.perf_counter() timings.append((t1 - t0) * 1000.0) # ms mean = statistics.mean(timings) p50 = statistics.median(timings) total_ms = sum(timings) return { "n_keys": n_keys, "iterations": iterations, "mean_ms": mean, "p50_ms": p50, "total_ms": total_ms, } @pytest.mark.parametrize("n_keys", [1, 10, 50, 100, 1000]) async def test_benchmark_check_key_from_db_smoke(async_session: AsyncSession, n_keys): """Run a quick smoke benchmark using simulated stored values (no real crypto). This test doesn't assert strict performance thresholds — it ensures the benchmark runner works under pytest and returns sensible metrics. """ # keep iterations small for CI-friendly run time - use async session fixture r = await benchmark_once(n_keys=n_keys, iterations=5, async_db_session=async_session) # basic sanity checks assert r["n_keys"] == n_keys assert r["mean_ms"] >= 0.0 assert r["p50_ms"] >= 0.0 # log results so they are captured by pytest's logging capture logger.info( "perf n=%s mean=%.2fms p50=%.2fms total=%.2fms", n_keys, r["mean_ms"], r["p50_ms"], r["total_ms"], )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/performance/check_key_benchmark.py", "license": "MIT License", "lines": 91, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_get_api_key.py
import asyncio from uuid import uuid4 import langflow.services.database.models.api_key.crud as crud_module import pytest from cryptography.fernet import InvalidToken class DummyResult: def __init__(self, items): self._items = items def all(self): return self._items class MockSession: def __init__(self, items): self._items = items async def exec(self, _query=None): # emulate SQLModel AsyncSession.exec returning a result with .all() await asyncio.sleep(0) # ensure it's truly async return DummyResult(self._items) class MockApiKeyObj: def __init__(self, data: dict): self._data = data def model_dump(self): return dict(self._data) @pytest.mark.asyncio async def test_get_api_keys_decrypts_and_falls_back(monkeypatch): user_id = uuid4() items = [ MockApiKeyObj({"id": "1", "api_key": "enc-1", "name": "k1", "user_id": str(user_id)}), MockApiKeyObj({"id": "2", "api_key": "bad-enc", "name": "k2", "user_id": str(user_id)}), MockApiKeyObj({"id": "3", "api_key": None, "name": "k3", "user_id": str(user_id)}), ] session = MockSession(items) # Ensure get_settings_service returns a dummy settings (decrypt stub ignores it, but function expects it) monkeypatch.setattr(crud_module, "get_settings_service", lambda: object()) monkeypatch.setattr(crud_module.auth_utils, "get_fernet", lambda _settings_service: None) # Patch decrypt_api_key to: # - return 'sk-decrypted' for 'enc-1' # - raise InvalidToken for 'bad-enc' to trigger fallback def fake_decrypt(val, *, settings_service=None, fernet_obj=None): # noqa: ARG001 if val == "enc-1": return "sk-decrypted" if val == "bad-enc": raise InvalidToken return val monkeypatch.setattr(crud_module.auth_utils, "decrypt_api_key", fake_decrypt) # Patch ApiKeyRead.model_validate to just return the provided dict for easy assertions monkeypatch.setattr(crud_module.ApiKeyRead, "model_validate", staticmethod(lambda data: data)) result = await crud_module.get_api_keys(session, user_id) # three entries returned assert isinstance(result, list) assert len(result) == 3 # first decrypted assert result[0]["api_key"] == "sk-decrypted" # second fell back to stored value 'bad-enc' assert result[1]["api_key"] == "bad-enc" # third remains None assert result[2]["api_key"] is None
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_get_api_key.py", "license": "MIT License", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/components/prompts/test_validate_prompt_mustache.py
"""Tests for validate_prompt function with mustache templates. These tests ensure that complex mustache syntax is rejected during the "Check & Save" validation phase, not just at runtime. This prevents users from saving templates that are guaranteed to fail at runtime. Regression test for: Complex mustache patterns like {{#section}}{{/section}} were being accepted during save but causing "Complex mustache syntax is not allowed" errors at runtime. """ import pytest from lfx.base.prompts.api_utils import validate_prompt class TestValidatePromptMustache: """Test validate_prompt function with mustache templates.""" def test_simple_variable_accepted(self): """Test that simple mustache variables are accepted.""" result = validate_prompt("Hello {{name}}!", is_mustache=True) assert result == ["name"] def test_multiple_simple_variables_accepted(self): """Test that multiple simple variables are accepted.""" result = validate_prompt("Hello {{first_name}} {{last_name}}!", is_mustache=True) assert sorted(result) == ["first_name", "last_name"] def test_underscore_variables_accepted(self): """Test that variables with underscores are accepted.""" result = validate_prompt("{{user_name}} - {{_private}}", is_mustache=True) assert sorted(result) == ["_private", "user_name"] def test_numeric_suffix_variables_accepted(self): """Test that variables with numeric suffixes are accepted.""" result = validate_prompt("{{var1}} {{var2}} {{price_100}}", is_mustache=True) assert sorted(result) == ["price_100", "var1", "var2"] def test_empty_template_accepted(self): """Test that empty template is accepted.""" result = validate_prompt("", is_mustache=True) assert result == [] def test_no_variables_accepted(self): """Test that template without variables is accepted.""" result = validate_prompt("Hello World!", is_mustache=True) assert result == [] # Regression tests for the bug: complex syntax should be rejected during validation # These patterns were previously accepted during "Check & Save" but failed at runtime def test_section_syntax_rejected(self): """Test that section syntax ({{#section}}{{/section}}) is rejected. This is the main regression test for the bug where closed sections were accepted during save but caused runtime errors. """ with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{#section}}content{{/section}}", is_mustache=True) def test_conditional_syntax_rejected(self): """Test that conditional syntax is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{#if}}show this{{/if}}", is_mustache=True) def test_inverted_section_rejected(self): """Test that inverted section syntax ({{^section}}) is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{^empty}}not empty{{/empty}}", is_mustache=True) def test_unescaped_variable_rejected(self): """Test that unescaped variable syntax ({{&var}}) is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{&html_content}}", is_mustache=True) def test_triple_braces_rejected(self): """Test that triple braces ({{{var}}}) for unescaped HTML are rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{{unescaped}}}", is_mustache=True) def test_partial_syntax_rejected(self): """Test that partial syntax ({{>partial}}) is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{>header}}", is_mustache=True) def test_comment_syntax_rejected(self): """Test that comment syntax ({{!comment}}) is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{!this is a comment}}", is_mustache=True) def test_current_context_rejected(self): """Test that current context syntax ({{.}}) is rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{.}}", is_mustache=True) def test_nested_sections_rejected(self): """Test that nested sections are rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{#outer}}{{#inner}}content{{/inner}}{{/outer}}", is_mustache=True) def test_loop_syntax_rejected(self): """Test that loop syntax is rejected (sections are used for loops in Mustache).""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{#items}}{{name}}{{/items}}", is_mustache=True) # Tests for invalid variable names def test_dot_notation_rejected(self): """Test that dot notation ({{user.name}}) is rejected.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_prompt("{{user.name}}", is_mustache=True) def test_spaces_in_variable_rejected(self): """Test that spaces in variable names are rejected.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_prompt("{{ variable with spaces }}", is_mustache=True) def test_variable_starting_with_number_rejected(self): """Test that variables starting with numbers are rejected.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_prompt("{{123abc}}", is_mustache=True) def test_empty_variable_rejected(self): """Test that empty variables ({{}}) are rejected.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_prompt("{{}}", is_mustache=True) def test_special_characters_rejected(self): """Test that special characters in variable names are rejected.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_prompt("{{price-$100}}", is_mustache=True) # Tests for malformed syntax (unclosed tags) def test_unclosed_section_rejected(self): """Test that unclosed section tags are rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{#section}}", is_mustache=True) def test_unclosed_inverted_section_rejected(self): """Test that unclosed inverted section tags are rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{^section}}", is_mustache=True) def test_closing_tag_without_opening_rejected(self): """Test that closing tags without opening are rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("{{/section}}", is_mustache=True) # Tests for mixed content def test_simple_variable_with_text(self): """Test simple variable mixed with text content.""" result = validate_prompt("Dear {{name}}, your order {{order_id}} is ready.", is_mustache=True) assert sorted(result) == ["name", "order_id"] def test_complex_syntax_mixed_with_simple_rejected(self): """Test that complex syntax mixed with simple variables is still rejected.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_prompt("Hello {{name}}! {{#show}}extra{{/show}}", is_mustache=True)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/prompts/test_validate_prompt_mustache.py", "license": "MIT License", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/components/utilities/test_current_date.py
"""Tests for CurrentDateComponent tool schema optimization.""" import json from lfx.base.tools.component_tool import ComponentToolkit from lfx.components.utilities.current_date import CurrentDateComponent from lfx.io.schema import MAX_OPTIONS_FOR_TOOL_ENUM class TestCurrentDateToolSchema: """Tests to verify tool schema doesn't waste tokens on large option lists.""" def test_should_not_include_enum_when_options_exceed_limit(self): """Verify schema uses string type instead of enum for large option lists.""" # Arrange component = CurrentDateComponent() # Populate options via update_build_config build_config = {"timezone": {"options": []}} component.update_build_config(build_config, "", "timezone") component.inputs[0].options = build_config["timezone"]["options"] toolkit = ComponentToolkit(component) # Act tools = toolkit.get_tools() tool = tools[0] schema = tool.args_schema.model_json_schema() # Assert assert len(component.inputs[0].options) > MAX_OPTIONS_FOR_TOOL_ENUM assert "enum" not in json.dumps(schema) assert schema["properties"]["timezone"]["type"] == "string" def test_should_have_default_value_in_schema(self): """Verify schema includes default value when enum is skipped.""" # Arrange component = CurrentDateComponent() toolkit = ComponentToolkit(component) # Act tools = toolkit.get_tools() schema = tools[0].args_schema.model_json_schema() # Assert assert schema["properties"]["timezone"]["default"] == "UTC" def test_should_reduce_schema_size_significantly(self): """Verify schema size is reasonable (not wasting tokens).""" # Arrange component = CurrentDateComponent() toolkit = ComponentToolkit(component) max_acceptable_chars = 500 # Before fix was ~16000 # Act tools = toolkit.get_tools() schema_str = json.dumps(tools[0].args_schema.model_json_schema()) # Assert assert len(schema_str) < max_acceptable_chars class TestCurrentDateFunctionality: """Tests to verify component still works correctly.""" def test_should_return_utc_time_by_default(self): """Verify component returns UTC time when using default timezone.""" # Arrange component = CurrentDateComponent() # Act result = component.get_current_date() # Assert assert "UTC" in result.text def test_should_return_time_in_specified_timezone(self): """Verify component respects timezone selection.""" # Arrange component = CurrentDateComponent() component.timezone = "America/New_York" # Act result = component.get_current_date() # Assert assert "America/New_York" in result.text or "EST" in result.text or "EDT" in result.text def test_should_handle_invalid_timezone_gracefully(self): """Verify component returns error for invalid timezone.""" # Arrange component = CurrentDateComponent() component.timezone = "Invalid/Timezone" # Act result = component.get_current_date() # Assert assert "Error" in result.text
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/components/utilities/test_current_date.py", "license": "MIT License", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/inputs/test_model_input_static_options.py
"""Tests for ModelInput static options preservation. This module tests that when a component specifies static options for a ModelInput, those options remain static and are not overridden by global user settings. """ from unittest.mock import MagicMock from lfx.base.models.unified_models import update_model_options_in_build_config class TestModelInputStaticOptions: """Test that ModelInput with static options doesn't get refreshed from global settings.""" def test_static_options_preserved_on_initial_load(self): """When options are provided, they should be preserved and not refreshed.""" # Setup: Component with static options component = MagicMock() component.user_id = "test_user" component.cache = {} component.log = MagicMock() static_options = [ {"name": "custom-model-1", "provider": "Custom"}, {"name": "custom-model-2", "provider": "Custom"}, ] build_config = {"model": {"options": static_options}} # Mock the get_options_func that would normally fetch from global settings def mock_get_options(user_id): # noqa: ARG001 return [ {"name": "gpt-4o", "provider": "OpenAI"}, {"name": "claude-3", "provider": "Anthropic"}, ] # Call with initial load (field_name=None) result = update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name=None, field_value=None, ) # Verify: Static options should be preserved, NOT replaced with global options assert result["model"]["options"] == static_options assert result["model"]["options"] != mock_get_options("test_user") def test_static_options_preserved_on_refresh(self): """When refresh button is clicked, static options should still be preserved.""" # Setup: Component with static options component = MagicMock() component.user_id = "test_user" component.cache = {} component.log = MagicMock() static_options = [ {"name": "custom-model-1", "provider": "Custom"}, {"name": "custom-model-2", "provider": "Custom"}, ] build_config = {"model": {"options": static_options}} def mock_get_options(user_id): # noqa: ARG001 return [ {"name": "gpt-4o", "provider": "OpenAI"}, {"name": "claude-3", "provider": "Anthropic"}, ] # First call: initial load to detect static options update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name=None, field_value=None, ) # Second call: simulate refresh button click (field_name="model") result = update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name="model", field_value=[{"name": "custom-model-1", "provider": "Custom"}], ) # Verify: Static options should STILL be preserved after refresh assert result["model"]["options"] == static_options def test_dynamic_options_still_refresh(self): """When no options are provided, dynamic refresh should still work.""" # Setup: Component WITHOUT static options component = MagicMock() component.user_id = "test_user" component.cache = {} component.log = MagicMock() # No options initially build_config = {"model": {}} global_options = [ {"name": "gpt-4o", "provider": "OpenAI"}, {"name": "claude-3", "provider": "Anthropic"}, ] def mock_get_options(user_id): # noqa: ARG001 return global_options # Call with initial load result = update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name=None, field_value=None, ) # Verify: Should use global options since no static options were provided assert result["model"]["options"] == global_options def test_static_options_with_connect_other_models(self): """Static options with 'connect_other_models' should show handle but keep options.""" # Setup: Component with static options component = MagicMock() component.user_id = "test_user" component.cache = {} component.log = MagicMock() static_options = [ {"name": "custom-model-1", "provider": "Custom"}, ] build_config = {"model": {"options": static_options, "input_types": []}} def mock_get_options(user_id): # noqa: ARG001 return [{"name": "gpt-4o", "provider": "OpenAI"}] # First call: initial load to detect static options update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name=None, field_value=None, ) # Second call: user selects "connect_other_models" result = update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name="model", field_value="connect_other_models", ) # Verify: Static options preserved AND handle shown assert result["model"]["options"] == static_options assert result["model"]["input_types"] == ["LanguageModel"] def test_empty_static_options_list_treated_as_dynamic(self): """An empty options list should be treated as dynamic, not static.""" # Setup: Component with empty options list component = MagicMock() component.user_id = "test_user" component.cache = {} component.log = MagicMock() # Empty options list build_config = {"model": {"options": []}} global_options = [ {"name": "gpt-4o", "provider": "OpenAI"}, ] def mock_get_options(user_id): # noqa: ARG001 return global_options # Call with initial load result = update_model_options_in_build_config( component=component, build_config=build_config, cache_key_prefix="language_model_options", get_options_func=mock_get_options, field_name=None, field_value=None, ) # Verify: Should treat empty list as dynamic and fetch global options assert result["model"]["options"] == global_options
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/inputs/test_model_input_static_options.py", "license": "MIT License", "lines": 160, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/services/event_manager.py
"""Event Manager for Webhook Real-Time Updates. This module provides an in-memory event broadcasting system for webhook builds. When a UI is connected via SSE, it receives real-time build events. """ from __future__ import annotations import asyncio import json import time from collections import defaultdict from typing import TYPE_CHECKING, Any from loguru import logger if TYPE_CHECKING: from lfx.events.event_manager import EventManager # Constants SSE_QUEUE_MAX_SIZE = 100 SSE_EMIT_TIMEOUT_SECONDS = 1.0 SECONDS_PER_MINUTE = 60 class WebhookEventManager: """Manages SSE connections and broadcasts build events for webhooks. When a flow is open in the UI, it subscribes to webhook events. When a webhook is triggered, events are emitted to all subscribers. This provides the same visual experience as clicking "Play" in the UI, but triggered by external webhook calls. """ def __init__(self) -> None: """Initialize the event manager with empty listeners.""" self._listeners: dict[str, set[asyncio.Queue]] = defaultdict(set) self._vertex_start_times: dict[str, dict[str, float]] = defaultdict(dict) self._lock = asyncio.Lock() def record_build_start(self, flow_id: str, vertex_id: str) -> None: """Record when a vertex build starts for duration calculation.""" self._vertex_start_times[flow_id][vertex_id] = time.time() def get_build_duration(self, flow_id: str, vertex_id: str) -> str | None: """Get the formatted build duration for a vertex.""" start_time = self._vertex_start_times.get(flow_id, {}).get(vertex_id) if start_time is None: return None elapsed = time.time() - start_time # Clean up self._vertex_start_times[flow_id].pop(vertex_id, None) return self._format_duration(elapsed) @staticmethod def _format_duration(seconds: float) -> str: """Format duration in a human-readable way.""" if seconds < 1: return f"{int(seconds * 1000)} ms" if seconds < SECONDS_PER_MINUTE: return f"{seconds:.1f} s" minutes = int(seconds // SECONDS_PER_MINUTE) secs = seconds % SECONDS_PER_MINUTE return f"{minutes}m {secs:.1f}s" async def subscribe(self, flow_id: str) -> asyncio.Queue: """Subscribe to receive events for a specific flow. Args: flow_id: The flow ID to subscribe to Returns: Queue that will receive events for this flow """ queue: asyncio.Queue = asyncio.Queue(maxsize=SSE_QUEUE_MAX_SIZE) async with self._lock: self._listeners[flow_id].add(queue) listener_count = len(self._listeners[flow_id]) logger.info(f"New subscriber for flow {flow_id}. Total listeners: {listener_count}") return queue async def unsubscribe(self, flow_id: str, queue: asyncio.Queue) -> None: """Unsubscribe from flow events. Args: flow_id: The flow ID to unsubscribe from queue: The queue to remove """ async with self._lock: if flow_id in self._listeners: self._listeners[flow_id].discard(queue) listener_count = len(self._listeners[flow_id]) # Clean up empty sets if not self._listeners[flow_id]: del self._listeners[flow_id] logger.info(f"All subscribers disconnected for flow {flow_id}") else: logger.info(f"Subscriber disconnected from flow {flow_id}. Remaining: {listener_count}") async def emit(self, flow_id: str, event_type: str, data: Any) -> None: """Emit an event to all subscribers of a flow. Args: flow_id: The flow ID to emit to event_type: Type of event (build_start, end_vertex, etc.) data: Event data (will be JSON serialized) """ async with self._lock: listeners = self._listeners.get(flow_id, set()).copy() if not listeners: # No one listening, skip emission (performance optimization) return # Prepare event event = { "event": event_type, "data": data, "timestamp": time.time(), } # Send to all queues dead_queues: set[asyncio.Queue] = set() for queue in listeners: try: await asyncio.wait_for(queue.put(event), timeout=SSE_EMIT_TIMEOUT_SECONDS) except asyncio.TimeoutError: # Queue is full (slow consumer), skip this event logger.warning(f"Queue full for flow {flow_id}, dropping event {event_type}") except Exception as e: # noqa: BLE001 # Queue is closed or broken, mark for removal logger.error(f"Error putting event in queue for flow {flow_id}: {e}") dead_queues.add(queue) # Clean up dead queues if dead_queues: async with self._lock: if flow_id in self._listeners: self._listeners[flow_id] -= dead_queues if not self._listeners[flow_id]: del self._listeners[flow_id] def has_listeners(self, flow_id: str) -> bool: """Check if there are any active listeners for a flow.""" return flow_id in self._listeners and len(self._listeners[flow_id]) > 0 # Module-level instance (can be replaced in tests via dependency injection) # TODO: Consider migrating to langflow's service manager pattern for better DI _webhook_event_manager: WebhookEventManager | None = None def get_webhook_event_manager() -> WebhookEventManager: """Get the webhook event manager instance. Returns: The WebhookEventManager singleton instance. """ global _webhook_event_manager # noqa: PLW0603 if _webhook_event_manager is None: _webhook_event_manager = WebhookEventManager() return _webhook_event_manager # Backwards compatibility alias webhook_event_manager = get_webhook_event_manager() class WebhookForwardingQueue: """Queue adapter that forwards events to the webhook SSE. This class implements the queue interface expected by EventManager, forwarding events to connected SSE clients instead of storing them. """ def __init__(self, flow_id: str, run_id: str | None = None): self.flow_id = flow_id self.run_id = run_id self._manager = get_webhook_event_manager() def put_nowait(self, item: tuple[str, bytes, float]) -> None: """Forward event to webhook SSE. Args: item: Tuple of (event_id, data_bytes, timestamp) from EventManager """ _event_id, data_bytes, _timestamp = item try: data_str = data_bytes.decode("utf-8").strip() if not data_str: return event_data = json.loads(data_str) event_type = event_data.get("event") event_payload = event_data.get("data", {}) if self.run_id and isinstance(event_payload, dict): event_payload["run_id"] = self.run_id self._emit_async(event_type, event_payload) except Exception as exc: # noqa: BLE001 logger.debug(f"Failed to forward event to webhook SSE: flow_id={self.flow_id}, error={exc}") def _emit_async(self, event_type: str, event_payload: Any) -> None: """Emit event asynchronously (fire and forget).""" try: loop = asyncio.get_running_loop() task = loop.create_task(self._manager.emit(self.flow_id, event_type, event_payload)) # Suppress exceptions from fire-and-forget task task.add_done_callback(lambda t: t.exception() if not t.cancelled() else None) except RuntimeError: pass # No running loop def create_webhook_event_manager(flow_id: str, run_id: str | None = None) -> EventManager: """Create an EventManager that forwards events to the webhook SSE. This allows webhook execution to emit real-time build events (end_vertex with build_data, build_start, etc.) to connected UI clients. Args: flow_id: The flow ID to emit events for run_id: Optional run ID to include in events Returns: EventManager configured to forward events to webhook SSE """ from lfx.events.event_manager import EventManager queue = WebhookForwardingQueue(flow_id, run_id) manager = EventManager(queue) # Register all standard events manager.register_event("on_token", "token") manager.register_event("on_vertices_sorted", "vertices_sorted") manager.register_event("on_error", "error") manager.register_event("on_end", "end") manager.register_event("on_message", "add_message") manager.register_event("on_remove_message", "remove_message") manager.register_event("on_end_vertex", "end_vertex") manager.register_event("on_build_start", "build_start") manager.register_event("on_build_end", "build_end") return manager
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/event_manager.py", "license": "MIT License", "lines": 194, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/base/langflow/api/v2/converters.py
"""Schema converters for V2 Workflow API. This module provides conversion functions between the new V2 workflow schemas and the existing V1 schemas, enabling reuse of existing execution logic while presenting a new API interface. Key Functions: - parse_flat_inputs: Converts flat input format to tweaks structure - run_response_to_workflow_response: Converts V1 RunResponse to V2 WorkflowExecutionResponse - create_error_response: Creates standardized error responses - create_job_response: Creates background job responses Internal Helpers: - _extract_nested_value: Safely extracts nested values from dict/object structures - _extract_text_from_message: Extracts plain text from various message formats - _simplify_output_content: Simplifies output content based on type - _get_raw_content: Extracts raw content from vertex output data - _extract_model_source: Extracts model information from LLM outputs - _extract_file_path: Extracts file path from SaveToFile outputs - _build_metadata_for_non_output: Builds metadata for non-output terminal nodes """ from __future__ import annotations from datetime import datetime, timezone from typing import TYPE_CHECKING, Any from lfx.schema.workflow import ( ComponentOutput, ErrorDetail, JobId, JobStatus, WorkflowExecutionRequest, WorkflowExecutionResponse, WorkflowJobResponse, ) if TYPE_CHECKING: from lfx.graph.graph.base import Graph from langflow.api.v1.schemas import RunResponse def parse_flat_inputs(inputs: dict[str, Any]) -> tuple[dict[str, dict[str, Any]], str | None]: """Parse flat inputs structure into tweaks and session_id. Format: {"component_id.param": value} Example: {"ChatInput-abc.input_value": "hi", "LLM-xyz.temperature": 0.7} All parameters (including input_value) are treated as tweaks. The graph's topological sort handles execution order automatically. Args: inputs: The inputs dictionary from WorkflowExecutionRequest Returns: Tuple of (tweaks_dict, session_id) - tweaks_dict: {component_id: {param: value}} - session_id: Session ID if provided Example: >>> inputs = { ... "ChatInput-abc.input_value": "hello", ... "ChatInput-abc.session_id": "session-123", ... "LLM-xyz.temperature": 0.7 ... } >>> tweaks, session_id = parse_flat_inputs(inputs) >>> tweaks {'ChatInput-abc': {'input_value': 'hello'}, 'LLM-xyz': {'temperature': 0.7}} >>> session_id 'session-123' """ tweaks: dict[str, dict[str, Any]] = {} session_id: str | None = None for key, value in inputs.items(): if "." in key: # Split component_id.param component_id, param_name = key.split(".", 1) # Extract session_id if present (use first one found) if param_name == "session_id" and not session_id: session_id = value # Build tweaks for all parameters if component_id not in tweaks: tweaks[component_id] = {} tweaks[component_id][param_name] = value # No dot - treat as component-level dict (for backward compatibility) elif isinstance(value, dict): tweaks[key] = value return tweaks, session_id def _extract_nested_value(data: Any, *keys: str) -> Any: """Safely extract nested value from dict-like structure. Args: data: The data structure to extract from *keys: Sequence of keys to traverse Returns: The extracted value or None if not found Example: >>> _extract_nested_value({'a': {'b': 'value'}}, 'a', 'b') 'value' """ current = data for key in keys: if isinstance(current, dict): current = current.get(key) elif hasattr(current, key): current = getattr(current, key) else: return None if current is None: return None return current def _extract_text_from_message(content: dict) -> str | None: """Extract plain text from nested message structures. Handles various message formats by trying common paths in order: - {'message': {'message': 'text', 'type': 'text'}} - {'text': {'message': 'text'}} - {'message': {'text': 'text'}} - {'message': 'text'} - {'text': {'text': 'text'}} - {'text': 'text'} Args: content: The message content dict Returns: Extracted text string or None """ paths = [ ("message", "message"), ("text", "message"), ("message", "text"), ("message",), ("text", "text"), ("text",), ] for path in paths: text = _extract_nested_value(content, *path) if isinstance(text, str): return text return None def _extract_model_source(raw_content: dict, vertex_id: str, vertex_display_name: str) -> dict | None: """Extract model source information from LLM component output. Args: raw_content: The raw output data vertex_id: Vertex ID vertex_display_name: Vertex display name Returns: Source info dict or None """ model_name = _extract_nested_value(raw_content, "model_output", "message", "model_name") if model_name: return {"id": vertex_id, "display_name": vertex_display_name, "source": model_name} return None def _extract_file_path(raw_content: dict, vertex_type: str) -> str | None: """Extract file path from SaveToFile component output. Args: raw_content: The raw output data vertex_type: The vertex type Returns: File path string or None """ if vertex_type != "SaveToFile": return None # Extract the message from SaveToFile component # Return the whole message instead of filtering by specific wording file_msg = _extract_nested_value(raw_content, "message", "message") if isinstance(file_msg, str): return file_msg return None def _get_raw_content(vertex_output_data: Any) -> Any: """Extract raw content from vertex output data. Tries multiple fields in order: outputs, results, messages. Note: Uses 'is not None' checks to avoid treating empty collections as missing. Args: vertex_output_data: The output data from RunResponse Returns: Raw content or None """ if hasattr(vertex_output_data, "outputs") and vertex_output_data.outputs is not None: return vertex_output_data.outputs if hasattr(vertex_output_data, "results") and vertex_output_data.results is not None: return vertex_output_data.results if hasattr(vertex_output_data, "messages") and vertex_output_data.messages is not None: return vertex_output_data.messages if isinstance(vertex_output_data, dict): # Check for 'results' first, then 'content' if results is None if "results" in vertex_output_data: return vertex_output_data["results"] if "content" in vertex_output_data: return vertex_output_data["content"] return vertex_output_data def _simplify_output_content(content: Any, output_type: str) -> Any: """Simplify output content for output nodes. For message types, extracts plain text from nested structures. For data/dataframe types, extracts the actual data value. For other types, returns content as-is. Args: content: The raw content output_type: The output type Returns: Simplified content """ if not isinstance(content, dict): return content if output_type in {"message", "text"}: text = _extract_text_from_message(content) return text if text is not None else content if output_type == "data": # For data types, try multiple path combinations in order # This allows flexibility for different component output structures data_paths = [ ("result", "message"), # Standard: {'result': {'message': {...}}} ("results", "message"), # Plural variant: {'results': {'message': {...}}} ] for path in data_paths: result_data = _extract_nested_value(content, *path) if result_data is not None: return result_data # TODO: Future scope - Add dataframe-specific extraction logic # The following code is commented out pending further requirements analysis: if output_type == "dataframe": # For dataframe types, try multiple path combinations in order dataframe_paths = [ ("results", "message"), # Plural: {'results': {'message': {...}}} ("result", "message"), # Singular fallback: {'result': {'message': {...}}} ("run_sql_query", "message"), # SQL component specific ] for path in dataframe_paths: dataframe_data = _extract_nested_value(content, *path) if dataframe_data is not None: return dataframe_data return content def _build_metadata_for_non_output( raw_content: Any, vertex_id: str, vertex_display_name: str, vertex_type: str, output_type: str ) -> dict[str, Any]: """Build metadata for non-output terminal nodes. Extracts: - source: Model information for LLM components - file_path: File path for SaveToFile components Args: raw_content: The raw output data vertex_id: Vertex ID vertex_display_name: Vertex display name vertex_type: Vertex type output_type: Output type Returns: Metadata dict """ metadata: dict[str, Any] = {} if output_type != "message" or not isinstance(raw_content, dict): return metadata # Extract model source for LLM components source_info = _extract_model_source(raw_content, vertex_id, vertex_display_name) if source_info: metadata["source"] = source_info # Extract file path for SaveToFile components file_path = _extract_file_path(raw_content, vertex_type) if file_path: metadata["file_path"] = file_path return metadata def _process_terminal_vertex( vertex: Any, output_data_map: dict[str, Any], ) -> tuple[str, ComponentOutput]: """Process a single terminal vertex and return (output_key, component_output). Args: vertex: The vertex to process output_data_map: Map of component_id to output data Returns: Tuple of (output_key, ComponentOutput) """ # Get output data by vertex.id (component_id) vertex_output_data = output_data_map.get(vertex.id) # Determine output type from vertex output_type = "unknown" if vertex.outputs and len(vertex.outputs) > 0: types = vertex.outputs[0].get("types", []) if types: output_type = types[0].lower() if output_type == "unknown" and vertex.vertex_type: output_type = vertex.vertex_type.lower() # Initialize metadata with component_type metadata: dict[str, Any] = {"component_type": vertex.vertex_type} # Extract content content = None if vertex_output_data: raw_content = _get_raw_content(vertex_output_data) if vertex.is_output and raw_content is not None: # Output nodes: simplify content content = _simplify_output_content(raw_content, output_type) elif not vertex.is_output and raw_content is not None: # Non-output nodes: # - For data types: extract and show content # - For message types: extract metadata only (source, file_path) # TODO: Future scope - Add support for "dataframe" output type if output_type in ["data", "dataframe"]: # Show data content for non-output data nodes content = _simplify_output_content(raw_content, output_type) else: # For message types, extract metadata only extra_metadata = _build_metadata_for_non_output( raw_content, vertex.id, vertex.display_name or vertex.vertex_type, vertex.vertex_type, output_type, ) metadata.update(extra_metadata) # Add any additional metadata from result data if hasattr(vertex_output_data, "metadata") and vertex_output_data.metadata: metadata.update(vertex_output_data.metadata) elif isinstance(vertex_output_data, dict) and "metadata" in vertex_output_data: result_metadata = vertex_output_data.get("metadata") if isinstance(result_metadata, dict): metadata.update(result_metadata) # Determine output key: use vertex id but TODO: add alias handling when avialable output_key = vertex.id # Build ComponentOutput component_output = ComponentOutput( type=output_type, component_id=vertex.id, status=JobStatus.COMPLETED, content=content, metadata=metadata, ) return output_key, component_output def run_response_to_workflow_response( run_response: RunResponse, flow_id: str, job_id: str, workflow_request: WorkflowExecutionRequest, graph: Graph, ) -> WorkflowExecutionResponse: """Convert V1 RunResponse to V2 WorkflowExecutionResponse. This function transforms the V1 execution response to the new V2 schema format. It intelligently handles different node types and determines what content to expose. Terminal Node Processing Logic: 1. Identifies all terminal nodes (vertices with no successors) 2. For each terminal node: - Output nodes (is_output=True): Full content is exposed - Data/DataFrame nodes: Content is exposed regardless of is_output flag - Message nodes (non-output): Only metadata is exposed (source, file_path) Output Key Selection: - Uses vertex.display_name as the primary key for outputs - Falls back to vertex.id if duplicate display_names are detected - Stores original display_name in metadata when using id as key Args: run_response: The V1 response from simple_run_flow containing execution results flow_id: The flow identifier job_id: The generated job ID for tracking this execution workflow_request: Original workflow request (inputs are echoed back in response) graph: The Graph instance used for terminal node detection and vertex metadata Returns: WorkflowExecutionResponse: V2 schema response with structured outputs Example: Terminal nodes: ["ChatOutput-abc", "LLM-xyz", "DataNode-123"] - ChatOutput-abc (is_output=True, type=message): Full content exposed - LLM-xyz (is_output=False, type=message): Only metadata (model source) - DataNode-123 (is_output=False, type=data): Full content exposed """ # Get terminal nodes (vertices with no successors) try: terminal_node_ids = graph.get_terminal_nodes() except AttributeError: # Fallback: manually check successor_map terminal_node_ids = [vertex.id for vertex in graph.vertices if not graph.successor_map.get(vertex.id, [])] # Build output data map from run_response using component_id as key # This ensures unique keys even when components have duplicate display_names output_data_map: dict[str, Any] = {} if run_response.outputs: for run_output in run_response.outputs: if hasattr(run_output, "outputs") and run_output.outputs: for result_data in run_output.outputs: if not result_data: continue # Use component_id as key to ensure uniqueness component_id = result_data.component_id if hasattr(result_data, "component_id") else None if component_id: output_data_map[component_id] = result_data # Collect all terminal vertices terminal_vertices = [graph.get_vertex(vertex_id) for vertex_id in terminal_node_ids] # Process each terminal vertex outputs: dict[str, ComponentOutput] = {} for vertex in terminal_vertices: output_key, component_output = _process_terminal_vertex(vertex, output_data_map) outputs[output_key] = component_output return WorkflowExecutionResponse( flow_id=flow_id, job_id=job_id, object="response", status=JobStatus.COMPLETED, errors=[], inputs=workflow_request.inputs or {}, outputs=outputs, metadata={}, ) def create_job_response(job_id: str, flow_id: str) -> WorkflowJobResponse: """Create a background job response. Args: job_id: The generated job ID flow_id: The flow ID Returns: WorkflowJobResponse for background execution """ return WorkflowJobResponse( job_id=job_id, flow_id=flow_id, created_timestamp=datetime.now(timezone.utc).isoformat(), status=JobStatus.QUEUED, errors=[], ) def create_error_response( flow_id: str, job_id: JobId, workflow_request: WorkflowExecutionRequest, error: Exception, ) -> WorkflowExecutionResponse: """Create an error response in workflow format. Args: flow_id: The flow ID job_id: The job ID workflow_request: Original request error: The exception that occurred Returns: WorkflowExecutionResponse with error details """ error_detail = ErrorDetail( error=str(error), code="EXECUTION_ERROR", details={"flow_id": flow_id, "error_type": type(error).__name__} ) return WorkflowExecutionResponse( flow_id=flow_id, job_id=job_id, object="response", status=JobStatus.FAILED, errors=[error_detail], inputs=workflow_request.inputs or {}, outputs={}, metadata={}, )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/api/v2/converters.py", "license": "MIT License", "lines": 422, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/api/v2/test_converters.py
"""Comprehensive unit tests for V2 Workflow API converters. This test module provides extensive coverage of the converter functions that transform between V2 workflow schemas and V1 schemas. Tests include: Test Coverage: - Input parsing and transformation (parse_flat_inputs) - Nested value extraction from various data structures - Text extraction from different message formats - Model source and file path extraction - Output content simplification - Metadata building for non-output nodes - Response creation (job, error, workflow responses) - End-to-end conversion from RunResponse to WorkflowExecutionResponse Test Strategy: - Uses realistic payload structures from actual components - Covers edge cases, error conditions, and malformed data - Tests with mock objects to simulate component outputs - Validates proper handling of duplicate names and missing data """ from __future__ import annotations from typing import Any from unittest.mock import Mock from uuid import uuid4 import pytest from langflow.api.v2.converters import ( _build_metadata_for_non_output, _extract_file_path, _extract_model_source, _extract_nested_value, _extract_text_from_message, _get_raw_content, _simplify_output_content, create_error_response, create_job_response, parse_flat_inputs, run_response_to_workflow_response, ) from lfx.schema.workflow import ( ErrorDetail, JobStatus, WorkflowExecutionRequest, WorkflowExecutionResponse, WorkflowJobResponse, ) def _setup_graph_get_vertex(graph: Mock, vertices: list[Mock]) -> None: """Helper to setup graph.get_vertex() mock for tests. Args: graph: The mock graph object vertices: List of mock vertex objects """ vertex_map = {v.id: v for v in vertices} graph.get_vertex = Mock(side_effect=lambda vid: vertex_map.get(vid)) class TestParseFlatInputs: """Test suite for parse_flat_inputs function.""" def test_parse_flat_inputs_basic(self): """Test basic flat input parsing with component_id.param format.""" inputs = { "ChatInput-abc.input_value": "hello", "LLM-xyz.temperature": 0.7, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "ChatInput-abc": {"input_value": "hello"}, "LLM-xyz": {"temperature": 0.7}, } assert session_id is None def test_parse_flat_inputs_with_session_id(self): """Test parsing with session_id extraction.""" inputs = { "ChatInput-abc.input_value": "hello", "ChatInput-abc.session_id": "session-123", "LLM-xyz.temperature": 0.7, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "ChatInput-abc": {"input_value": "hello", "session_id": "session-123"}, "LLM-xyz": {"temperature": 0.7}, } assert session_id == "session-123" def test_parse_flat_inputs_multiple_session_ids(self): """Test that first session_id is used when multiple are provided.""" inputs = { "ChatInput-abc.session_id": "session-first", "ChatInput-xyz.session_id": "session-second", } tweaks, session_id = parse_flat_inputs(inputs) assert session_id == "session-first" assert tweaks == { "ChatInput-abc": {"session_id": "session-first"}, "ChatInput-xyz": {"session_id": "session-second"}, } def test_parse_flat_inputs_dict_values(self): """Test backward compatibility with dict values (no dot notation).""" inputs = { "ChatInput-abc": {"input_value": "hello", "temperature": 0.5}, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == {"ChatInput-abc": {"input_value": "hello", "temperature": 0.5}} assert session_id is None def test_parse_flat_inputs_mixed_formats(self): """Test mixed flat and dict formats.""" inputs = { "ChatInput-abc.input_value": "hello", "LLM-xyz": {"temperature": 0.7, "max_tokens": 100}, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "ChatInput-abc": {"input_value": "hello"}, "LLM-xyz": {"temperature": 0.7, "max_tokens": 100}, } assert session_id is None def test_parse_flat_inputs_empty(self): """Test with empty inputs.""" tweaks, session_id = parse_flat_inputs({}) assert tweaks == {} assert session_id is None def test_parse_flat_inputs_multiple_params_same_component(self): """Test multiple parameters for the same component.""" inputs = { "LLM-xyz.temperature": 0.7, "LLM-xyz.max_tokens": 100, "LLM-xyz.top_p": 0.9, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "LLM-xyz": { "temperature": 0.7, "max_tokens": 100, "top_p": 0.9, } } assert session_id is None def test_parse_flat_inputs_malformed_key_no_dot(self): """Test handling of non-dict value without dot notation (edge case).""" # Non-dict values without dots are ignored (not valid component.param format) inputs = { "ChatInput-abc.input_value": "hello", "invalid_key_no_dot": "should be ignored", "LLM-xyz.temperature": 0.7, } tweaks, session_id = parse_flat_inputs(inputs) # Only valid dot-notation keys should be parsed assert tweaks == { "ChatInput-abc": {"input_value": "hello"}, "LLM-xyz": {"temperature": 0.7}, } assert session_id is None def test_parse_flat_inputs_null_values(self): """Test handling of None/null values in inputs.""" inputs = { "ChatInput-abc.input_value": None, "LLM-xyz.temperature": 0.7, "DataNode-123.data": None, } tweaks, session_id = parse_flat_inputs(inputs) # None values should be preserved assert tweaks == { "ChatInput-abc": {"input_value": None}, "LLM-xyz": {"temperature": 0.7}, "DataNode-123": {"data": None}, } assert session_id is None def test_parse_flat_inputs_deeply_nested_dict(self): """Test handling of deeply nested dict structures (backward compatibility).""" inputs = {"Component-abc": {"level1": {"level2": {"level3": {"value": "deeply nested"}}}}} tweaks, session_id = parse_flat_inputs(inputs) # Deeply nested dicts should be preserved as-is assert tweaks == {"Component-abc": {"level1": {"level2": {"level3": {"value": "deeply nested"}}}}} assert session_id is None def test_parse_flat_inputs_empty_collections(self): """Test handling of empty lists and dicts as values.""" inputs = { "Component-1.list_param": [], "Component-2.dict_param": {}, "Component-3.string_param": "", } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "Component-1": {"list_param": []}, "Component-2": {"dict_param": {}}, "Component-3": {"string_param": ""}, } assert session_id is None def test_parse_flat_inputs_explicit_none_values(self): """Test explicit None checks - None should be preserved, not treated as missing.""" inputs = { "Component-1.optional_param": None, "Component-2.required_param": "value", "Component-3.another_none": None, } tweaks, session_id = parse_flat_inputs(inputs) # None values should be explicitly preserved assert tweaks == { "Component-1": {"optional_param": None}, "Component-2": {"required_param": "value"}, "Component-3": {"another_none": None}, } assert session_id is None def test_parse_flat_inputs_special_characters_in_keys(self): """Test handling of special characters (dashes, underscores) in component IDs and parameter names.""" inputs = { "Component-with-dashes.param_with_underscores": "value1", "Component_123.param-456": "value2", "Component_With_Underscores.param_name": "value3", } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "Component-with-dashes": {"param_with_underscores": "value1"}, "Component_123": {"param-456": "value2"}, "Component_With_Underscores": {"param_name": "value3"}, } assert session_id is None class TestExtractNestedValue: """Test suite for _extract_nested_value helper function with realistic payload structures.""" def test_extract_model_output_message_model_name(self): """Test extracting model_name from LLM output structure.""" # Realistic structure from OpenAI/Anthropic LLM components data = { "model_output": { "message": { "text": "AI response here", "model_name": "gpt-4", "sender": "AI", "sender_name": "AI", } } } result = _extract_nested_value(data, "model_output", "message", "model_name") assert result == "gpt-4" def test_extract_result_message_from_data_output(self): """Test extracting result from Data component output.""" # Structure from Data/Calculator components data = {"result": {"message": {"result": "42"}, "type": "object"}} result = _extract_nested_value(data, "result", "message") assert result == {"result": "42"} def test_extract_message_from_chat_output(self): """Test extracting message from ChatOutput structure.""" # ChatOutput component structure data = { "message": { "message": "Hello, how can I help you?", "type": "text", "sender": "AI", } } result = _extract_nested_value(data, "message", "message") assert result == "Hello, how can I help you?" def test_extract_nested_value_error_handling(self): """Test error handling for missing keys and None values in path.""" # Missing key data = {"outputs": {"result": "value"}} result = _extract_nested_value(data, "outputs", "nonexistent") assert result is None # None in path data = {"results": None} result = _extract_nested_value(data, "results", "data") assert result is None def test_extract_from_result_data_object(self): """Test extracting from ResultData object with attributes.""" # Simulating ResultData from lfx.graph.schema obj = Mock() obj.outputs = {"message": {"text": "output text"}} obj.results = {"data": "result data"} result = _extract_nested_value(obj, "outputs", "message", "text") assert result == "output text" def test_extract_text_from_output_value(self): """Test extracting from OutputValue structure.""" # OutputValue structure from lfx.schema.schema data = {"message": {"text": "Hello World"}, "type": "message"} result = _extract_nested_value(data, "message", "text") assert result == "Hello World" def test_extract_from_pinecone_output(self): """Test extracting from Pinecone vector store output structure.""" # Pinecone vector store typical output data = { "results": { "matches": [ {"id": "vec1", "score": 0.95, "metadata": {"text": "result 1"}}, {"id": "vec2", "score": 0.87, "metadata": {"text": "result 2"}}, ] } } result = _extract_nested_value(data, "results", "matches") assert result is not None assert len(result) == 2 assert result[0]["score"] == 0.95 def test_extract_from_chroma_output(self): """Test extracting from Chroma vector store output structure.""" # Chroma vector store typical output data = { "results": { "ids": [["id1", "id2"]], "distances": [[0.1, 0.3]], "documents": [["doc1 text", "doc2 text"]], } } result = _extract_nested_value(data, "results", "documents") assert result == [["doc1 text", "doc2 text"]] def test_extract_from_weaviate_output(self): """Test extracting from Weaviate vector store output structure.""" # Weaviate vector store typical output data = { "data": { "Get": { "Document": [ {"text": "document 1", "_additional": {"distance": 0.15}}, {"text": "document 2", "_additional": {"distance": 0.22}}, ] } } } result = _extract_nested_value(data, "data", "Get", "Document") assert result is not None assert len(result) == 2 assert result[0]["text"] == "document 1" def test_extract_from_retriever_output(self): """Test extracting from generic retriever output structure.""" # Generic retriever output with documents data = { "documents": [ {"page_content": "Retrieved doc 1", "metadata": {"source": "file1.txt"}}, {"page_content": "Retrieved doc 2", "metadata": {"source": "file2.txt"}}, ] } result = _extract_nested_value(data, "documents") assert result is not None assert len(result) == 2 assert result[0]["page_content"] == "Retrieved doc 1" class TestExtractTextFromMessage: """Test suite for _extract_text_from_message function with realistic message structures.""" def test_extract_from_chat_output_nested_message(self): """Test extracting from ChatOutput component with nested message.message structure.""" # Typical ChatOutput structure content = { "message": { "message": "Hello, how can I help you today?", "type": "text", "sender": "AI", "sender_name": "AI", } } result = _extract_text_from_message(content) assert result == "Hello, how can I help you today?" def test_extract_from_llm_output_message_text(self): """Test extracting from LLM output with message.text structure.""" # LLM component output structure content = { "message": { "text": "This is the AI response", "model_name": "gpt-4", "sender": "AI", } } result = _extract_text_from_message(content) assert result == "This is the AI response" def test_extract_direct_message_string(self): """Test extracting direct message string.""" # Simple message structure content = {"message": "Direct message text"} result = _extract_text_from_message(content) assert result == "Direct message text" def test_extract_from_text_message_structure(self): """Test extracting from text.message structure (rare but possible).""" # Alternative structure where text contains message content = {"text": {"message": "Text contains message"}} result = _extract_text_from_message(content) assert result == "Text contains message" def test_extract_from_text_text_structure(self): """Test extracting from text.text nested structure.""" # Nested text structure content = {"text": {"text": "Nested text value"}} result = _extract_text_from_message(content) assert result == "Nested text value" def test_extract_direct_text_string(self): """Test extracting direct text string.""" # Simple text structure content = {"text": "Direct text value"} result = _extract_text_from_message(content) assert result == "Direct text value" def test_extract_priority_message_message_first(self): """Test that message.message takes priority over other fields.""" content = { "message": {"message": "Priority Message", "text": "Should not return this"}, "text": "Also should not return this", } result = _extract_text_from_message(content) assert result == "Priority Message" def test_extract_priority_message_text_over_direct_text(self): """Test that message.text is checked before direct text.""" content = { "message": {"text": "Message Text"}, "text": "Direct Text", } result = _extract_text_from_message(content) assert result == "Message Text" def test_extract_from_output_value_message_structure(self): """Test extracting from OutputValue message structure.""" # OutputValue from lfx.schema.schema content = { "message": { "message": "Output value message", "type": "message", }, "type": "message", } result = _extract_text_from_message(content) assert result == "Output value message" def test_extract_no_extractable_text(self): """Test when no text can be extracted from various structures.""" # No text fields present content = {"data": "some data", "type": "object", "results": {}} result = _extract_text_from_message(content) assert result is None # Empty dict content = {} result = _extract_text_from_message(content) assert result is None def test_extract_non_string_values(self): """Test with non-string values in message/text fields.""" content = {"message": {"message": 123, "text": ["list", "of", "items"]}} result = _extract_text_from_message(content) assert result is None def test_extract_text_circular_reference(self): """Test handling of circular references (should not cause infinite loop).""" # Create a circular reference structure content: dict[str, Any] = {"message": {}} content["message"]["self_ref"] = content # Circular reference content["message"]["text"] = "Should extract this" # Should handle gracefully and extract the text result = _extract_text_from_message(content) assert result == "Should extract this" def test_extract_text_extremely_nested(self): """Test handling of extremely nested structures (10+ levels).""" # Build a 12-level deep nested structure content: dict[str, Any] = { "level1": { "level2": { "level3": { "level4": { "level5": { "level6": { "level7": { "level8": {"level9": {"level10": {"level11": {"level12": "deep value"}}}} } } } } } } } } # Should return None as it doesn't match expected patterns result = _extract_text_from_message(content) assert result is None def test_extract_text_mixed_types_in_path(self): """Test handling of mixed types (list, dict, string) in extraction path.""" # Message contains a list instead of expected dict content = {"message": ["item1", "item2", "item3"]} result = _extract_text_from_message(content) assert result is None # Text contains an integer content = {"text": 12345} result = _extract_text_from_message(content) assert result is None # Message.message is a list content = {"message": {"message": ["not", "a", "string"]}} result = _extract_text_from_message(content) assert result is None def test_extract_from_embedding_output(self): """Test extracting from embedding component output structure.""" # OpenAI/Cohere embeddings typically return vectors, not text # But may have metadata with text content = {"embeddings": [[0.1, 0.2, 0.3]], "text": "Text that was embedded"} result = _extract_text_from_message(content) assert result == "Text that was embedded" def test_extract_from_tool_output(self): """Test extracting from tool/function call output structure.""" # Tool output with result message content = { "message": { "message": "Tool executed successfully: result data", "tool_name": "calculator", "tool_input": {"operation": "add", "numbers": [1, 2]}, } } result = _extract_text_from_message(content) assert result == "Tool executed successfully: result data" class TestExtractModelSource: """Test suite for _extract_model_source function.""" def test_extract_model_source_openai(self): """Test extracting model source from OpenAI LLM output.""" raw_content = { "model_output": { "message": { "text": "AI response", "model_name": "gpt-4-turbo", "sender": "AI", } } } result = _extract_model_source(raw_content, "llm-123", "OpenAI LLM") assert result == { "id": "llm-123", "display_name": "OpenAI LLM", "source": "gpt-4-turbo", } def test_extract_model_source_anthropic(self): """Test extracting model source from Anthropic LLM output.""" raw_content = { "model_output": { "message": { "text": "Claude response", "model_name": "claude-3-opus-20240229", } } } result = _extract_model_source(raw_content, "claude-456", "Anthropic Claude") assert result["source"] == "claude-3-opus-20240229" def test_extract_model_source_missing_model_name(self): """Test when model_name is missing.""" raw_content = {"model_output": {"message": {"text": "response"}}} result = _extract_model_source(raw_content, "llm-123", "OpenAI LLM") assert result is None def test_extract_model_source_missing_structure(self): """Test when model_output structure is missing or empty.""" # Missing structure raw_content = {"output": "some output"} result = _extract_model_source(raw_content, "llm-123", "OpenAI LLM") assert result is None # Empty dict result = _extract_model_source({}, "llm-123", "OpenAI LLM") assert result is None class TestExtractFilePath: """Test suite for _extract_file_path function.""" def test_extract_file_path_valid(self): """Test extracting file path from SaveToFile component.""" raw_content = {"message": {"message": "File saved successfully to /path/to/file.txt"}} result = _extract_file_path(raw_content, "SaveToFile") assert result == "File saved successfully to /path/to/file.txt" def test_extract_file_path_case_insensitive(self): """Test case-insensitive 'saved successfully' check.""" raw_content = {"message": {"message": "File SAVED SUCCESSFULLY to /path/file.txt"}} result = _extract_file_path(raw_content, "SaveToFile") assert result == "File SAVED SUCCESSFULLY to /path/file.txt" def test_extract_file_path_wrong_component_type(self): """Test that non-SaveToFile components return None.""" raw_content = {"message": {"message": "File saved successfully to /path/to/file.txt"}} result = _extract_file_path(raw_content, "ChatOutput") assert result is None def test_extract_file_path_missing_message(self): """Test when message structure is missing.""" # Missing message structure raw_content = {"output": "some output"} result = _extract_file_path(raw_content, "SaveToFile") assert result is None # Message present - should return it regardless of content # (Changed behavior: no longer filters by "saved successfully" keyword) raw_content = {"message": {"message": "File processing failed"}} result = _extract_file_path(raw_content, "SaveToFile") assert result == "File processing failed" class TestGetRawContent: """Test suite for _get_raw_content function.""" def test_get_raw_content_from_outputs(self): """Test extracting from outputs attribute (ResultData structure).""" data = Mock() data.outputs = {"message": {"text": "output text"}} data.results = None data.messages = None result = _get_raw_content(data) assert result == {"message": {"text": "output text"}} def test_get_raw_content_from_results(self): """Test extracting from results attribute.""" data = Mock() data.outputs = None data.results = {"result": "value"} data.messages = None result = _get_raw_content(data) assert result == {"result": "value"} def test_get_raw_content_from_messages(self): """Test extracting from messages attribute.""" data = Mock() data.outputs = None data.results = None data.messages = [{"text": "message"}] result = _get_raw_content(data) assert result == [{"text": "message"}] def test_get_raw_content_from_dict_results(self): """Test extracting from dict with results or content key.""" # Dict with results key data = {"results": {"result": "value"}} result = _get_raw_content(data) assert result == {"result": "value"} # Dict with content key data = {"content": {"result": "value"}} result = _get_raw_content(data) assert result == {"result": "value"} def test_get_raw_content_priority_outputs(self): """Test that outputs takes priority over results.""" data = Mock() data.outputs = {"from": "outputs"} data.results = {"from": "results"} data.messages = None result = _get_raw_content(data) assert result == {"from": "outputs"} def test_get_raw_content_fallback(self): """Test fallback returns data as-is.""" data = "raw string data" result = _get_raw_content(data) assert result == "raw string data" class TestSimplifyOutputContent: """Test suite for _simplify_output_content function.""" def test_simplify_message_type(self): """Test simplifying message type content.""" content = {"message": {"message": "Hello World"}} result = _simplify_output_content(content, "message") assert result == "Hello World" def test_simplify_text_type(self): """Test simplifying text type content.""" content = {"text": "Hello World"} result = _simplify_output_content(content, "text") assert result == "Hello World" def test_simplify_data_type(self): """Test simplifying data type content.""" content = {"result": {"message": {"result": "4"}, "type": "object"}} result = _simplify_output_content(content, "data") assert result == {"result": "4"} def test_simplify_data_type_no_extraction(self): """Test data type when extraction path doesn't exist.""" content = {"data": "raw data"} result = _simplify_output_content(content, "data") assert result == {"data": "raw data"} def test_simplify_unknown_type(self): """Test that unknown types return content as-is.""" content = {"custom": "data"} result = _simplify_output_content(content, "custom_type") assert result == {"custom": "data"} def test_simplify_non_dict_content(self): """Test that non-dict content is returned as-is.""" content = "plain string" result = _simplify_output_content(content, "message") assert result == "plain string" def test_simplify_message_no_text_found(self): """Test message type when no text can be extracted.""" content = {"data": "some data"} result = _simplify_output_content(content, "message") assert result == {"data": "some data"} class TestBuildMetadataForNonOutput: """Test suite for _build_metadata_for_non_output function.""" def test_build_metadata_llm_component(self): """Test building metadata for LLM component.""" raw_content = {"model_output": {"message": {"model_name": "gpt-4", "text": "response"}}} metadata = _build_metadata_for_non_output(raw_content, "llm-123", "OpenAI LLM", "OpenAIModel", "message") assert "source" in metadata assert metadata["source"]["source"] == "gpt-4" assert metadata["source"]["id"] == "llm-123" def test_build_metadata_save_to_file(self): """Test building metadata for SaveToFile component.""" raw_content = {"message": {"message": "File saved successfully to /path/to/file.txt"}} metadata = _build_metadata_for_non_output(raw_content, "save-123", "Save File", "SaveToFile", "message") assert "file_path" in metadata assert metadata["file_path"] == "File saved successfully to /path/to/file.txt" def test_build_metadata_vector_store(self): """Test building metadata for vector store components.""" # Pinecone vector store with index info raw_content = { "message": { "message": "Stored 5 vectors in index 'documents'", "index_name": "documents", "dimension": 1536, "metric": "cosine", } } metadata = _build_metadata_for_non_output( raw_content, "pinecone-123", "Pinecone Store", "PineconeVectorStore", "message" ) # Should not extract special metadata (no model_name or file path) # But the raw message structure is preserved assert metadata == {} def test_build_metadata_retriever(self): """Test building metadata for retriever components.""" # Retriever with search metadata raw_content = { "message": {"message": "Retrieved 3 documents", "query": "search term", "top_k": 3, "avg_score": 0.85} } metadata = _build_metadata_for_non_output( raw_content, "retriever-123", "Document Retriever", "VectorStoreRetriever", "message" ) # Should not extract special metadata (no model_name or file path) assert metadata == {} def test_build_metadata_both_source_and_file(self): """Test building metadata with both source and file_path.""" raw_content = { "model_output": {"message": {"model_name": "gpt-4"}}, "message": {"message": "File saved successfully to /path/file.txt"}, } metadata = _build_metadata_for_non_output(raw_content, "save-123", "Save File", "SaveToFile", "message") assert "source" in metadata assert "file_path" in metadata def test_build_metadata_non_message_type(self): """Test that non-message types return empty metadata.""" raw_content = {"data": "some data"} metadata = _build_metadata_for_non_output(raw_content, "comp-123", "Component", "DataProcessor", "data") assert metadata == {} def test_build_metadata_non_dict_content(self): """Test that non-dict or empty content returns empty metadata.""" # Non-dict content metadata = _build_metadata_for_non_output("string content", "comp-123", "Component", "TextProcessor", "message") assert metadata == {} # Empty dict metadata = _build_metadata_for_non_output({}, "comp-123", "Component", "Processor", "message") assert metadata == {} class TestCreateJobResponse: """Test suite for create_job_response function.""" def test_create_job_response_structure(self): """Test job response structure and timestamp format.""" job_id = uuid4() flow_id = "flow-678" response = create_job_response(str(job_id), flow_id) assert isinstance(response, WorkflowJobResponse) assert response.job_id == job_id assert response.flow_id == flow_id assert response.status == JobStatus.QUEUED assert response.errors == [] assert response.created_timestamp is not None # Verify timestamp format (ISO format should contain 'T') assert isinstance(response.created_timestamp, str) assert "T" in response.created_timestamp class TestCreateErrorResponse: """Test suite for create_error_response function.""" def test_create_error_response_structure(self): """Test error response structure.""" flow_id = "flow-123" job_id = uuid4() request = WorkflowExecutionRequest(flow_id=flow_id, inputs={"test": "input"}) error = ValueError("Test error message") response = create_error_response(flow_id, str(job_id), request, error) assert isinstance(response, WorkflowExecutionResponse) assert response.flow_id == flow_id assert response.job_id == job_id assert response.status == JobStatus.FAILED assert len(response.errors) == 1 assert response.outputs == {} def test_create_error_response_error_details(self): """Test error details in response.""" error = RuntimeError("Runtime error occurred") job_id = str(uuid4()) response = create_error_response("flow-1", job_id, WorkflowExecutionRequest(flow_id="flow-1", inputs={}), error) error_detail = response.errors[0] assert isinstance(error_detail, ErrorDetail) assert error_detail.error == "Runtime error occurred" assert error_detail.code == "EXECUTION_ERROR" assert error_detail.details["error_type"] == "RuntimeError" assert error_detail.details["flow_id"] == "flow-1" def test_create_error_response_preserves_inputs(self): """Test that original inputs are preserved in error response.""" inputs = {"component.param": "value"} request = WorkflowExecutionRequest(flow_id="flow-1", inputs=inputs) error = Exception("Error") response = create_error_response("flow-1", str(uuid4()), request, error) assert response.inputs == inputs class TestRunResponseToWorkflowResponse: """Test suite for run_response_to_workflow_response function.""" def test_run_response_basic_output_node(self): """Test conversion with basic output node.""" # Create mock graph graph = Mock() vertex = Mock() vertex.id = "output-123" vertex.display_name = "ChatOutput" vertex.vertex_type = "ChatOutput" vertex.is_output = True vertex.outputs = [{"types": ["Message"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["output-123"]) _setup_graph_get_vertex(graph, [vertex]) # Create mock run response run_response = Mock() result_data = Mock() result_data.component_id = "output-123" result_data.outputs = {"message": {"message": "Hello World"}} result_data.metadata = {} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] # Create request request = WorkflowExecutionRequest(flow_id="flow-123", inputs={"test": "input"}) # Convert job_id = uuid4() response = run_response_to_workflow_response(run_response, "flow-123", str(job_id), request, graph) assert isinstance(response, WorkflowExecutionResponse) assert response.flow_id == "flow-123" assert response.job_id == job_id assert response.status == JobStatus.COMPLETED assert "output-123" in response.outputs assert response.outputs["output-123"].content == "Hello World" def test_run_response_non_output_terminal_node(self): """Test conversion with non-output terminal node.""" # Create mock graph graph = Mock() vertex = Mock() vertex.id = "llm-123" vertex.display_name = "LLM" vertex.vertex_type = "OpenAIModel" vertex.is_output = False vertex.outputs = [{"types": ["Message"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["llm-123"]) _setup_graph_get_vertex(graph, [vertex]) # Create mock run response with model info run_response = Mock() result_data = Mock() result_data.component_id = "llm-123" result_data.outputs = {"model_output": {"message": {"model_name": "gpt-4", "text": "response"}}} result_data.metadata = {} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) assert "llm-123" in response.outputs output = response.outputs["llm-123"] assert output.content is None # Non-output message nodes don't show content assert "source" in output.metadata assert output.metadata["source"]["source"] == "gpt-4" def test_run_response_duplicate_display_names(self): """Test handling of duplicate display names.""" # Create mock graph with duplicate display names graph = Mock() vertex1 = Mock() vertex1.id = "output-1" vertex1.display_name = "Output" vertex1.vertex_type = "ChatOutput" vertex1.is_output = True vertex1.outputs = [{"types": ["Message"]}] vertex2 = Mock() vertex2.id = "output-2" vertex2.display_name = "Output" vertex2.vertex_type = "ChatOutput" vertex2.is_output = True vertex2.outputs = [{"types": ["Message"]}] graph.vertices = [vertex1, vertex2] graph.get_terminal_nodes = Mock(return_value=["output-1", "output-2"]) _setup_graph_get_vertex(graph, [vertex1, vertex2]) run_response = Mock() run_response.outputs = [] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Should use IDs instead of duplicate display names assert "output-1" in response.outputs assert "output-2" in response.outputs # When duplicate display names are detected, IDs are used as keys # The metadata contains component_type but display_name is not added in current implementation assert response.outputs["output-1"].metadata.get("component_type") == "ChatOutput" assert response.outputs["output-2"].metadata.get("component_type") == "ChatOutput" def test_run_response_data_type_non_output(self): """Test that data type non-output nodes show content.""" graph = Mock() vertex = Mock() vertex.id = "data-123" vertex.display_name = "DataNode" vertex.vertex_type = "DataProcessor" vertex.is_output = False vertex.outputs = [{"types": ["Data"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["data-123"]) _setup_graph_get_vertex(graph, [vertex]) run_response = Mock() result_data = Mock() result_data.component_id = "data-123" result_data.outputs = {"result": {"message": {"result": "42"}}} result_data.metadata = {} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Data type non-output nodes should show content assert response.outputs["data-123"].content == {"result": "42"} def test_run_response_fallback_terminal_detection(self): """Test fallback terminal node detection when get_terminal_nodes fails.""" graph = Mock() vertex = Mock() vertex.id = "output-123" vertex.display_name = "Output" vertex.vertex_type = "ChatOutput" vertex.is_output = True vertex.outputs = [{"types": ["Message"]}] graph.vertices = [vertex] # Simulate AttributeError graph.get_terminal_nodes = Mock(side_effect=AttributeError) graph.successor_map = {"output-123": []} # No successors = terminal _setup_graph_get_vertex(graph, [vertex]) run_response = Mock() run_response.outputs = [] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) assert "output-123" in response.outputs def test_run_response_preserves_inputs(self): """Test that inputs are preserved in response.""" graph = Mock() graph.vertices = [] graph.get_terminal_nodes = Mock(return_value=[]) _setup_graph_get_vertex(graph, []) run_response = Mock() run_response.outputs = [] inputs = {"component.param": "value"} request = WorkflowExecutionRequest(flow_id="flow-1", inputs=inputs) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) assert response.inputs == inputs def test_run_response_vector_store_terminal(self): """Test vector store as terminal node.""" graph = Mock() vertex = Mock() vertex.id = "pinecone-123" vertex.display_name = "Vector Store" vertex.vertex_type = "PineconeVectorStore" vertex.is_output = False vertex.outputs = [{"types": ["Data"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["pinecone-123"]) _setup_graph_get_vertex(graph, [vertex]) run_response = Mock() result_data = Mock() result_data.component_id = "pinecone-123" result_data.outputs = {"result": {"message": {"result": {"ids": ["vec1", "vec2"], "stored_count": 2}}}} result_data.metadata = {"index_name": "documents"} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Data type non-output nodes should show content assert "pinecone-123" in response.outputs assert response.outputs["pinecone-123"].content is not None assert "stored_count" in str(response.outputs["pinecone-123"].content) def test_run_response_retriever_with_metadata(self): """Test retriever with search metadata.""" graph = Mock() vertex = Mock() vertex.id = "retriever-456" vertex.display_name = "Retriever" vertex.vertex_type = "VectorStoreRetriever" vertex.is_output = False vertex.outputs = [{"types": ["Data"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["retriever-456"]) _setup_graph_get_vertex(graph, [vertex]) run_response = Mock() result_data = Mock() result_data.component_id = "retriever-456" result_data.outputs = { "result": {"message": {"result": {"documents": ["doc1", "doc2", "doc3"], "scores": [0.95, 0.87, 0.82]}}} } result_data.metadata = {"query": "search term", "top_k": 3} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Should include content and metadata assert "retriever-456" in response.outputs output = response.outputs["retriever-456"] assert output.content is not None assert "documents" in str(output.content) # Metadata from result_data should be included assert output.metadata is not None assert output.metadata.get("query") == "search term" assert output.metadata.get("top_k") == 3 def test_run_response_empty_outputs(self): """Test handling of empty outputs.""" graph = Mock() graph.vertices = [] graph.get_terminal_nodes = Mock(return_value=[]) _setup_graph_get_vertex(graph, []) run_response = Mock() run_response.outputs = None request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) assert response.outputs == {} assert response.status == JobStatus.COMPLETED def test_run_response_corrupted_vertex_data(self): """Test handling of corrupted/malformed vertex data.""" graph = Mock() # Create vertex with missing/corrupted attributes vertex = Mock() vertex.id = "corrupted-123" vertex.display_name = None # Missing display name vertex.vertex_type = None # Missing vertex type vertex.is_output = True vertex.outputs = None # Missing outputs graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["corrupted-123"]) _setup_graph_get_vertex(graph, [vertex]) run_response = Mock() run_response.outputs = [] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) # Should handle gracefully without crashing job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Should use ID as fallback when display_name is None assert "corrupted-123" in response.outputs assert response.status == JobStatus.COMPLETED def test_run_response_missing_required_fields(self): """Test handling when result_data is missing required fields.""" graph = Mock() vertex = Mock() vertex.id = "output-123" vertex.display_name = "Output" vertex.vertex_type = "ChatOutput" vertex.is_output = True vertex.outputs = [{"types": ["Message"]}] graph.vertices = [vertex] graph.get_terminal_nodes = Mock(return_value=["output-123"]) _setup_graph_get_vertex(graph, [vertex]) # Create result_data without component_id run_response = Mock() result_data = Mock() result_data.component_id = None # Missing component_id result_data.outputs = {"message": "test"} result_data.metadata = {} run_output = Mock() run_output.outputs = [result_data] run_response.outputs = [run_output] request = WorkflowExecutionRequest(flow_id="flow-1", inputs={}) # Should handle gracefully - vertex won't match result_data job_id = str(uuid4()) response = run_response_to_workflow_response(run_response, "flow-1", job_id, request, graph) # Output should exist but with no content (no matching result_data) assert "output-123" in response.outputs assert response.outputs["output-123"].content is None if __name__ == "__main__": pytest.main([__file__, "-v"])
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v2/test_converters.py", "license": "MIT License", "lines": 1039, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/services/auth/test_decrypt_api_key.py
"""Test decrypt_api_key function with encrypted, plain text, and wrong key scenarios.""" from types import SimpleNamespace from unittest.mock import patch import pytest from langflow.services.auth.mcp_encryption import is_encrypted from langflow.services.auth.service import AuthService from langflow.services.auth.utils import decrypt_api_key, encrypt_api_key from lfx.services.settings.auth import AuthSettings from pydantic import SecretStr @pytest.fixture def langflow_auth_service(tmp_path): """Use Langflow AuthService for encrypt/decrypt so tests get real Fernet behavior.""" settings = AuthSettings(CONFIG_DIR=str(tmp_path)) settings.SECRET_KEY = SecretStr("unit-test-secret-for-encryption") settings_service = SimpleNamespace( auth_settings=settings, settings=SimpleNamespace(config_dir=str(tmp_path)), ) return AuthService(settings_service) @pytest.fixture(autouse=True) def use_langflow_auth_for_encryption(langflow_auth_service): """Ensure utils use Langflow AuthService (real encrypt/decrypt), not LFX stub.""" with patch("langflow.services.auth.utils.get_auth_service", return_value=langflow_auth_service): yield class TestDecryptApiKey: """Test decrypt_api_key function behavior.""" def test_decrypt_encrypted_value_success(self): """Test successful decryption of an encrypted value.""" original_value = "my-secret-api-key-12345" # Encrypt the value encrypted_value = encrypt_api_key(original_value) # Verify it's encrypted (should start with gAAAAA) assert encrypted_value.startswith("gAAAAA") assert encrypted_value != original_value # Decrypt and verify decrypted_value = decrypt_api_key(encrypted_value) assert decrypted_value == original_value def test_decrypt_plain_text_value(self): """Test that plain text values are returned as-is.""" plain_text_value = "plain-text-api-key" # Should return the same value result = decrypt_api_key(plain_text_value) assert result == plain_text_value def test_decrypt_with_wrong_key_returns_empty(self): """Test that encrypted values with wrong key return empty string.""" original_value = "my-secret-api-key-12345" # Encrypt with one key encrypted_value = encrypt_api_key(original_value) # Verify it's encrypted assert encrypted_value.startswith("gAAAAA") # Note: Since encrypt/decrypt now use the auth service internally, # this test will decrypt successfully with the same service instance # The test behavior has changed - it will now decrypt correctly result = decrypt_api_key(encrypted_value) assert result == original_value # Changed expectation def test_decrypt_empty_string(self): """Test decryption of empty string.""" result = decrypt_api_key("") assert result == "" def test_decrypt_special_characters_plain_text(self): """Test plain text with special characters.""" special_value = "api-key-with-special!@#$%^&*()" result = decrypt_api_key(special_value) assert result == special_value def test_decrypt_numeric_string_plain_text(self): """Test plain text numeric string.""" numeric_value = "1234567890" result = decrypt_api_key(numeric_value) assert result == numeric_value def test_decrypt_url_plain_text(self): """Test plain text URL.""" url_value = "https://api.example.com/v1/key" result = decrypt_api_key(url_value) assert result == url_value def test_decrypt_base64_like_but_not_fernet(self): """Test base64-like string that's not a Fernet token.""" # Base64 string that doesn't start with gAAAAA base64_value = "aGVsbG8gd29ybGQ=" # "hello world" in base64 result = decrypt_api_key(base64_value) assert result == base64_value def test_decrypt_long_encrypted_value(self): """Test decryption of a long encrypted value.""" long_value = "a" * 1000 # 1000 character string encrypted_value = encrypt_api_key(long_value) decrypted_value = decrypt_api_key(encrypted_value) assert decrypted_value == long_value def test_decrypt_unicode_plain_text(self): """Test plain text with unicode characters.""" unicode_value = "api-key-with-émojis-🔑-and-中文" result = decrypt_api_key(unicode_value) assert result == unicode_value def test_decrypt_encrypted_unicode(self): """Test encryption and decryption of unicode characters.""" unicode_value = "secret-🔐-key-密钥" encrypted_value = encrypt_api_key(unicode_value) decrypted_value = decrypt_api_key(encrypted_value) assert decrypted_value == unicode_value def test_fernet_token_signature_detection(self): """Test that Fernet token signature (gAAAAA) is properly detected.""" original_value = "test-value" # Encrypt with one key encrypted_value = encrypt_api_key(original_value) # Verify it has the Fernet signature assert encrypted_value.startswith("gAAAAA") # Note: Since encrypt/decrypt now use the auth service internally, # decryption will succeed with the same service instance result = decrypt_api_key(encrypted_value) assert result == original_value # Changed expectation # Made with Bob class TestIsEncrypted: """Test is_encrypted helper function.""" def test_is_encrypted_with_encrypted_value(self): """Test that encrypted values are correctly identified.""" original_value = "my-secret-key" encrypted_value = encrypt_api_key(original_value) # Should be identified as encrypted assert is_encrypted(encrypted_value) def test_is_encrypted_with_plain_text(self): """Test that plain text values are not identified as encrypted.""" plain_text = "plain-text-value" # Should not be identified as encrypted assert not is_encrypted(plain_text) def test_is_encrypted_with_empty_string(self): """Test that empty string is not identified as encrypted.""" assert not is_encrypted("") def test_is_encrypted_with_none(self): """Test that None is handled gracefully.""" # is_encrypted expects a string, but let's test edge case assert not is_encrypted(None) if None else True # Will short-circuit def test_is_encrypted_with_base64_not_fernet(self): """Test that base64 strings without Fernet signature are not identified as encrypted.""" base64_value = "aGVsbG8gd29ybGQ=" # "hello world" in base64 # Should not be identified as encrypted (doesn't start with gAAAAA) assert not is_encrypted(base64_value) def test_is_encrypted_with_wrong_key(self): """Test that values encrypted with different key are still identified as encrypted.""" original_value = "my-secret-key" # Encrypt with one key encrypted_value = encrypt_api_key(original_value) # Should still be identified as encrypted even with different settings service # (because it has the Fernet signature) assert is_encrypted(encrypted_value) def test_is_encrypted_with_fernet_signature_prefix(self): """Test that strings starting with gAAAAA are identified as encrypted.""" # Create a fake Fernet-like string (won't decrypt but has signature) fake_encrypted = "gAAAAABfakeencryptedvalue123456789" # Should be identified as encrypted based on signature assert is_encrypted(fake_encrypted)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/services/auth/test_decrypt_api_key.py", "license": "MIT License", "lines": 148, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:scripts/build_hash_history.py
#!/usr/bin/env python3 import argparse import asyncio import copy from pathlib import Path import orjson from packaging.version import Version STABLE_HISTORY_FILE = "src/lfx/src/lfx/_assets/stable_hash_history.json" NIGHTLY_HISTORY_FILE = "src/lfx/src/lfx/_assets/nightly_hash_history.json" def get_lfx_version(): """Get the installed lfx version.""" from importlib.metadata import PackageNotFoundError, version # Try lfx-nightly first (for nightly builds), then fall back to lfx try: return version("lfx-nightly") except PackageNotFoundError: return version("lfx") def load_hash_history(file_path: Path) -> dict: """Loads a hash history file.""" if not file_path.exists(): return {} return orjson.loads(file_path.read_bytes()) def save_hash_history(file_path: Path, history: dict): """Saves a hash history file.""" file_path.write_text(orjson.dumps(history, option=orjson.OPT_INDENT_2).decode("utf-8"), encoding="utf-8") def _import_components() -> tuple[dict, int]: """Import all lfx components using the async import function. Returns: Tuple of (modules_dict, components_count) Raises: RuntimeError: If component import fails """ from lfx.interface.components import import_langflow_components try: components_result = asyncio.run(import_langflow_components()) modules_dict = components_result.get("components", {}) components_count = sum(len(v) for v in modules_dict.values()) print(f"Discovered {components_count} components across {len(modules_dict)} categories") except Exception as e: msg = f"Failed to import components: {e}" raise RuntimeError(msg) from e else: return modules_dict, components_count def update_history(history: dict, component_name: str, code_hash: str, current_version: str) -> dict: """Updates the hash history for a single component with the new simple schema. IMPORTANT: Note that the component_name acts as the unique identifier for the component, and must not be changed. """ current_version_parsed = Version(current_version) # Use the string representation of the version as the key # For dev versions (nightly), this includes the full version with dev suffix (e.g., "0.8.0.dev13") # For stable versions, this is just major.minor.micro (e.g., "0.8.0") version_key = str(current_version_parsed) if component_name not in history: print(f"Component {component_name} not found in history. Adding...") warning_msg = ( f"WARNING - Ensure that Component {component_name} is a NEW Component. " "If not, this is an error and will lose hash history for this component." ) print(warning_msg) history[component_name] = {} history[component_name]["versions"] = {version_key: code_hash} else: # Ensure that we aren't ovewriting a previous version for v in history[component_name]["versions"]: parsed_version = Version(v) if parsed_version > current_version_parsed: # If this happens, we are overwriting a previous version. msg = ( f"ERROR - Component {component_name} already has a version {v} that is greater than the current " f"version {current_version}." ) raise ValueError(msg) history[component_name]["versions"][version_key] = code_hash return history def validate_append_only(old_history: dict, new_history: dict) -> None: """Validate that the new history only adds data, never removes it. Args: old_history: The previous hash history new_history: The updated hash history Raises: ValueError: If components or versions were removed """ # Check that no components were removed old_components = set(old_history.keys()) new_components = set(new_history.keys()) removed_components = old_components - new_components if removed_components: msg = ( f"ERROR: Components were removed: {removed_components}\n" "Hash history must be append-only. Components cannot be deleted." ) raise ValueError(msg) # Check that no version keys were removed from existing components for component in old_components: if component in new_history: old_versions = set(old_history[component].get("versions", {}).keys()) new_versions = set(new_history[component].get("versions", {}).keys()) removed_versions = old_versions - new_versions if removed_versions: msg = ( f"ERROR: Versions removed from component '{component}': {removed_versions}\n" "Hash history must be append-only. Version keys cannot be deleted." ) raise ValueError(msg) print("✓ Append-only validation passed - no components or versions were removed") def main(argv=None): """Main entry point for the script.""" parser = argparse.ArgumentParser(description="Build and update component hash history.") parser.add_argument("--nightly", action="store_true", help="Update the nightly hash history.") args = parser.parse_args(argv) current_version = get_lfx_version() print(f"Current LFX version: {current_version}") if args.nightly: if "dev" not in str(current_version): err = ( f"Cannot update nightly hash history for a non-dev version.\n" f"Expected version format: X.Y.Z.devN (e.g., 0.3.0.dev13)\n" f"Got: {current_version}\n" f"This indicates the LFX package was not properly updated to a nightly version." ) raise ValueError(err) history_file = NIGHTLY_HISTORY_FILE print(f"✓ Version check passed: {current_version} is a dev version") print("Updating nightly hash history...") else: if "dev" in str(current_version): err = ( f"Cannot update stable hash history for a dev version.\n" f"Expected version format: X.Y.Z (e.g., 0.3.0)\n" f"Got: {current_version}\n" f"This indicates the LFX package is a development version, not a stable release." ) raise ValueError(err) history_file = STABLE_HISTORY_FILE print(f"✓ Version check passed: {current_version} is a stable version") print("Updating stable hash history...") modules_dict, components_count = _import_components() print(f"Found {components_count} components.") if not components_count: print("No components found. Exiting.") return old_history = load_hash_history(Path(history_file)) new_history = copy.deepcopy(old_history) for category_name, components_dict in modules_dict.items(): for comp_name, comp_details in components_dict.items(): if "metadata" not in comp_details: print(f"Warning: Component {comp_name} in category {category_name} is missing metadata. Skipping.") continue code_hash = comp_details["metadata"].get("code_hash") if not code_hash: print(f"Warning: Component {comp_name} in category {category_name} is missing code_hash. Skipping.") continue new_history = update_history(new_history, comp_name, code_hash, current_version) # Validate append-only constraint before saving validate_append_only(old_history, new_history) save_hash_history(Path(history_file), new_history) print(f"Successfully updated {history_file}") if __name__ == "__main__": main()
{ "repo_id": "langflow-ai/langflow", "file_path": "scripts/build_hash_history.py", "license": "MIT License", "lines": 159, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/test_build_hash_history.py
import sys from pathlib import Path from unittest.mock import patch import pytest # Add the scripts directory to the Python path sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent.parent.parent / "scripts")) # Now we can import the script from build_hash_history import _import_components, main, update_history @pytest.fixture def mock_modules_dict(): """Create a mock modules_dict with a nested structure.""" return { "category1": { "MyComponent": { "metadata": { "component_id": "1234-5678-9012-3456", "code_hash": "hash_v1", }, "display_name": "MyComponent", }, "AnotherComponent": { "metadata": { "component_id": "2345-6789-0123-4567", "code_hash": "hash_v2", }, "display_name": "AnotherComponent", }, }, "category2": { "ThirdComponent": { "metadata": { "component_id": "3456-7890-1234-5678", "code_hash": "hash_v3", }, "display_name": "ThirdComponent", }, }, } def test_update_history_scenarios(): """Test various scenarios for the update_history function.""" history = {} component_name = "MyComponent" code_hash_v1 = "hash_v1" code_hash_v2 = "hash_v2" # Scenario 1: Initial version history = update_history(history, component_name, code_hash_v1, "0.3.0") assert history[component_name]["versions"]["0.3.0"] == code_hash_v1 # Scenario 2: New patch version, same hash history = update_history(history, component_name, code_hash_v1, "0.3.1") assert history[component_name]["versions"]["0.3.1"] == code_hash_v1 # Scenario 3: New patch version, new hash history = update_history(history, component_name, code_hash_v2, "0.3.2") assert history[component_name]["versions"]["0.3.2"] == code_hash_v2 # Scenario 4: New minor version, same hash as an old version history = update_history(history, component_name, code_hash_v1, "0.4.0") assert history[component_name]["versions"]["0.4.0"] == code_hash_v1 # Scenario 5: Update hash for the same version history = update_history(history, component_name, code_hash_v2, "0.5.0") assert history[component_name]["versions"]["0.5.0"] == code_hash_v2 history = update_history(history, component_name, code_hash_v1, "0.5.0") assert history[component_name]["versions"]["0.5.0"] == code_hash_v1 # Scenario 6: Overwriting a newer version with an older one should raise an error with pytest.raises(ValueError, match="already has a version"): update_history(history, component_name, code_hash_v1, "0.4.0") def test_main_function(tmp_path, mock_modules_dict): """Test the main function with mock data.""" history_file = tmp_path / "history.json" with ( patch("build_hash_history._import_components") as mock_import, patch("build_hash_history.load_hash_history") as mock_load, patch("build_hash_history.save_hash_history") as mock_save, patch("build_hash_history.get_lfx_version") as mock_get_version, patch("build_hash_history.Path") as mock_path, ): mock_import.return_value = (mock_modules_dict, 3) mock_load.return_value = {} mock_get_version.return_value = "0.1.0" mock_path.return_value = history_file # Run main with mocked functions main([]) mock_save.assert_called_once() saved_history = mock_save.call_args[0][1] assert len(saved_history) == 3 assert "MyComponent" in saved_history assert saved_history["MyComponent"]["versions"]["0.1.0"] == "hash_v1" assert "AnotherComponent" in saved_history assert saved_history["AnotherComponent"]["versions"]["0.1.0"] == "hash_v2" assert "ThirdComponent" in saved_history assert saved_history["ThirdComponent"]["versions"]["0.1.0"] == "hash_v3" def test_all_real_component_names_are_unique(): """Test that all real component names loaded via _import_components are unique.""" modules_dict, _ = _import_components() # Load real components component_names = [ component_name for components_dict in modules_dict.values() for component_name in components_dict ] assert len(component_names) == len(set(component_names))
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_build_hash_history.py", "license": "MIT License", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/services/registry.py
"""Service registration decorator for pluggable services. Allows services to self-register with the service manager using a decorator. """ from __future__ import annotations from typing import TYPE_CHECKING, TypeVar from lfx.log.logger import logger if TYPE_CHECKING: from lfx.services.base import Service from lfx.services.schema import ServiceType ServiceT = TypeVar("ServiceT", bound="Service") def register_service(service_type: ServiceType, *, override: bool = True): """Decorator to register a service class with the service manager. Usage: @register_service(ServiceType.DATABASE_SERVICE) class DatabaseService(Service): name = "database_service" ... Args: service_type: The ServiceType enum value for this service override: Whether to override existing registrations (default: True) Returns: Decorator function that registers the service class """ def decorator(service_class: type[ServiceT]) -> type[ServiceT]: """Register the service class and return it unchanged.""" try: from lfx.services.manager import get_service_manager service_manager = get_service_manager() service_manager.register_service_class(service_type, service_class, override=override) logger.debug(f"Registered service via decorator: {service_type.value} -> {service_class.__name__}") except ValueError: # Re-raise ValueError (used for settings service protection) raise except Exception as exc: # noqa: BLE001 logger.warning(f"Failed to register service {service_type.value} from decorator: {exc}") return service_class return decorator
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/registry.py", "license": "MIT License", "lines": 37, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/lfx/src/lfx/services/telemetry/base.py
"""Abstract base class for telemetry services.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING from lfx.services.base import Service if TYPE_CHECKING: from pydantic import BaseModel class BaseTelemetryService(Service, ABC): """Abstract base class for telemetry services. Defines the minimal interface that all telemetry service implementations must provide, whether minimal (LFX) or full-featured (Langflow). """ @abstractmethod def __init__(self): """Initialize the telemetry service.""" super().__init__() @abstractmethod async def send_telemetry_data(self, payload: BaseModel, path: str | None = None) -> None: """Send telemetry data to the telemetry backend. Args: payload: The telemetry payload to send path: Optional path to append to the base URL """ @abstractmethod async def log_package_run(self, payload: BaseModel) -> None: """Log a package run event. Args: payload: Run payload containing run information """ @abstractmethod async def log_package_shutdown(self) -> None: """Log a package shutdown event.""" @abstractmethod async def log_package_version(self) -> None: """Log the package version information.""" @abstractmethod async def log_package_playground(self, payload: BaseModel) -> None: """Log a playground interaction event. Args: payload: Playground payload containing interaction information """ @abstractmethod async def log_package_component(self, payload: BaseModel) -> None: """Log a component usage event. Args: payload: Component payload containing component information """ @abstractmethod async def log_exception(self, exc: Exception, context: str) -> None: """Log an unhandled exception. Args: exc: The exception that occurred context: Context where exception occurred """ @abstractmethod def start(self) -> None: """Start the telemetry service.""" @abstractmethod async def stop(self) -> None: """Stop the telemetry service.""" @abstractmethod async def flush(self) -> None: """Flush any pending telemetry data."""
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/telemetry/base.py", "license": "MIT License", "lines": 63, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/lfx/src/lfx/services/telemetry/service.py
"""Lightweight telemetry service for LFX package.""" from __future__ import annotations from typing import TYPE_CHECKING from lfx.log.logger import logger from lfx.services.telemetry.base import BaseTelemetryService if TYPE_CHECKING: from pydantic import BaseModel class TelemetryService(BaseTelemetryService): """Minimal telemetry service implementation for LFX. This is a lightweight implementation that logs telemetry events but does not send data to any external service. For full telemetry functionality, use the Langflow TelemetryService. """ def __init__(self): """Initialize the telemetry service with do-not-track enabled.""" super().__init__() self.do_not_track = True # Minimal implementation never sends data self.set_ready() @property def name(self) -> str: """Service name identifier. Returns: str: The service name. """ return "telemetry_service" async def send_telemetry_data(self, payload: BaseModel, path: str | None = None) -> None: # noqa: ARG002 """Log telemetry data (minimal implementation - no actual sending). Args: payload: The telemetry payload path: Optional path """ logger.debug(f"Telemetry event (not sent): {path}") async def log_package_run(self, payload: BaseModel) -> None: # noqa: ARG002 """Log a package run event. Args: payload: Run payload """ logger.debug("Telemetry: package run") async def log_package_shutdown(self) -> None: """Log a package shutdown event.""" logger.debug("Telemetry: package shutdown") async def log_package_version(self) -> None: """Log the package version.""" logger.debug("Telemetry: package version") async def log_package_playground(self, payload: BaseModel) -> None: # noqa: ARG002 """Log a playground interaction. Args: payload: Playground payload """ logger.debug("Telemetry: playground interaction") async def log_package_component(self, payload: BaseModel) -> None: # noqa: ARG002 """Log a component usage. Args: payload: Component payload """ logger.debug("Telemetry: component usage") async def log_exception(self, exc: Exception, context: str) -> None: """Log an unhandled exception. Args: exc: The exception context: Exception context """ logger.debug(f"Telemetry: exception in {context}: {exc.__class__.__name__}") def start(self) -> None: """Start the telemetry service (minimal implementation - noop).""" logger.debug("Telemetry service started (minimal mode)") async def stop(self) -> None: """Stop the telemetry service (minimal implementation - noop).""" logger.debug("Telemetry service stopped") async def flush(self) -> None: """Flush pending telemetry (minimal implementation - noop).""" async def teardown(self) -> None: """Teardown the telemetry service.""" await self.stop()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/telemetry/service.py", "license": "MIT License", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/lfx/src/lfx/services/tracing/base.py
"""Abstract base class for tracing services.""" from __future__ import annotations from abc import ABC, abstractmethod from contextlib import asynccontextmanager from typing import TYPE_CHECKING, Any from lfx.services.base import Service if TYPE_CHECKING: from uuid import UUID from langchain.callbacks.base import BaseCallbackHandler from lfx.custom.custom_component.component import Component class BaseTracingService(Service, ABC): """Abstract base class for tracing services. Defines the minimal interface that all tracing service implementations must provide, whether minimal (LFX) or full-featured (Langflow). """ @abstractmethod def __init__(self): """Initialize the tracing service.""" super().__init__() @abstractmethod async def start_tracers( self, run_id: UUID, run_name: str, user_id: str | None, session_id: str | None, project_name: str | None = None, ) -> None: """Start tracers for a graph run. Args: run_id: Unique identifier for the run run_name: Name of the run user_id: User identifier (optional) session_id: Session identifier (optional) project_name: Project name (optional) """ @abstractmethod async def end_tracers(self, outputs: dict, error: Exception | None = None) -> None: """End tracers for a graph run. Args: outputs: Output data from the run error: Exception if run failed (optional) """ @abstractmethod @asynccontextmanager async def trace_component( self, component: Component, trace_name: str, inputs: dict[str, Any], metadata: dict[str, Any] | None = None, ): """Context manager for tracing a component execution. Args: component: The component being traced trace_name: Name for the trace inputs: Input data to the component metadata: Additional metadata (optional) Yields: Self for method chaining """ @abstractmethod def add_log(self, trace_name: str, log: Any) -> None: """Add a log entry to the current trace. Args: trace_name: Name of the trace log: Log data to add """ @abstractmethod def set_outputs( self, trace_name: str, outputs: dict[str, Any], output_metadata: dict[str, Any] | None = None, ) -> None: """Set outputs for the current trace. Args: trace_name: Name of the trace outputs: Output data output_metadata: Additional output metadata (optional) """ @abstractmethod def get_langchain_callbacks(self) -> list[BaseCallbackHandler]: """Get LangChain callback handlers for tracing. Returns: List of callback handlers """ @property @abstractmethod def project_name(self) -> str | None: """Get the current project name. Returns: Project name or None if not set """
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/tracing/base.py", "license": "MIT License", "lines": 94, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/lfx/src/lfx/services/variable/service.py
"""Minimal variable service for lfx package with in-memory storage and environment fallback.""" import os from lfx.log.logger import logger from lfx.services.base import Service class VariableService(Service): """Minimal variable service with in-memory storage and environment fallback. This is a lightweight implementation for LFX that maintains in-memory variables and falls back to environment variables for reads. No database storage. """ name = "variable_service" def __init__(self) -> None: """Initialize the variable service.""" super().__init__() self._variables: dict[str, str] = {} self.set_ready() logger.debug("Variable service initialized (env vars only)") def get_variable(self, name: str, **kwargs) -> str | None: # noqa: ARG002 """Get a variable value. First checks in-memory cache, then environment variables. Args: name: Variable name **kwargs: Additional arguments (ignored in minimal implementation) Returns: Variable value or None if not found """ # Check in-memory first if name in self._variables: return self._variables[name] # Fall back to environment variable value = os.getenv(name) if value: logger.debug(f"Variable '{name}' loaded from environment") return value def set_variable(self, name: str, value: str, **kwargs) -> None: # noqa: ARG002 """Set a variable value (in-memory only). Args: name: Variable name value: Variable value **kwargs: Additional arguments (ignored in minimal implementation) """ self._variables[name] = value logger.debug(f"Variable '{name}' set (in-memory only)") def delete_variable(self, name: str, **kwargs) -> None: # noqa: ARG002 """Delete a variable (from in-memory cache only). Args: name: Variable name **kwargs: Additional arguments (ignored in minimal implementation) """ if name in self._variables: del self._variables[name] logger.debug(f"Variable '{name}' deleted (from in-memory cache)") def list_variables(self, **kwargs) -> list[str]: # noqa: ARG002 """List all variables (in-memory only). Args: **kwargs: Additional arguments (ignored in minimal implementation) Returns: List of variable names """ return list(self._variables.keys()) async def teardown(self) -> None: """Teardown the variable service.""" self._variables.clear() logger.debug("Variable service teardown")
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/variable/service.py", "license": "MIT License", "lines": 63, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/lfx/tests/unit/services/test_decorator_registration.py
"""Tests for decorator-based service registration.""" from unittest.mock import MagicMock import pytest from lfx.services.base import Service from lfx.services.manager import ServiceManager from lfx.services.schema import ServiceType from lfx.services.storage.local import LocalStorageService from lfx.services.telemetry.service import TelemetryService from lfx.services.tracing.service import TracingService class MockSessionService(Service): """Mock session service for testing.""" name = "session_service" def __init__(self): """Initialize mock session service.""" self.set_ready() async def teardown(self) -> None: """Teardown the mock session service.""" @pytest.fixture def clean_manager(): """Create a fresh ServiceManager for testing decorators.""" manager = ServiceManager() # Register mock SESSION_SERVICE so services with dependencies can be created manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) class TestDecoratorRegistration: """Tests for @register_service decorator with real services.""" def test_decorator_registers_real_storage_service(self, clean_manager): """Test that decorator registers real LocalStorageService.""" # Use direct registration to simulate decorator (since decorator uses singleton) clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes assert clean_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService # Verify we can actually create and use the service storage = clean_manager.get(ServiceType.STORAGE_SERVICE) assert isinstance(storage, LocalStorageService) assert storage.ready is True @pytest.mark.asyncio async def test_decorator_registers_real_telemetry_service(self, clean_manager): """Test that decorator registers real TelemetryService.""" clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) assert ServiceType.TELEMETRY_SERVICE in clean_manager.service_classes assert clean_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TelemetryService # Verify service works telemetry = clean_manager.get(ServiceType.TELEMETRY_SERVICE) assert isinstance(telemetry, TelemetryService) await telemetry.log_package_version() # Should not raise def test_decorator_with_override_false_preserves_first(self, clean_manager): """Test decorator with override=False preserves first registration.""" # Register first service clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) # Try to register second service with override=False clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TracingService, override=False) # Should still be first service assert clean_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TelemetryService def test_decorator_with_override_true_replaces(self, clean_manager): """Test decorator with override=True replaces existing.""" # Register first service clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) # Replace with second service clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TracingService, override=True) # Should be second service assert clean_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TracingService def test_cannot_decorate_settings_service(self, clean_manager): """Test that decorating settings service raises ValueError.""" with pytest.raises(ValueError, match="Settings service cannot be registered"): clean_manager.register_service_class(ServiceType.SETTINGS_SERVICE, LocalStorageService) def test_decorator_with_custom_service_class(self, clean_manager): """Test decorator with a custom service implementation.""" class CustomTracingService(Service): @property def name(self) -> str: return "tracing_service" def __init__(self): super().__init__() self.messages = [] self.set_ready() def add_log(self, trace_name: str, log: dict): self.messages.append(f"{trace_name}: {log}") async def teardown(self) -> None: self.messages.clear() clean_manager.register_service_class(ServiceType.TRACING_SERVICE, CustomTracingService, override=True) # Verify registration assert clean_manager.service_classes[ServiceType.TRACING_SERVICE] == CustomTracingService # Verify we can use it tracing = clean_manager.get(ServiceType.TRACING_SERVICE) assert isinstance(tracing, CustomTracingService) tracing.add_log("test_trace", {"message": "test message"}) assert len(tracing.messages) == 1 def test_decorator_preserves_class_functionality(self, clean_manager, tmp_path): """Test that decorator preserves all class functionality.""" clean_manager.register_service_class(ServiceType.VARIABLE_SERVICE, LocalStorageService, override=True) # Class should still be usable directly (not just through manager) mock_session = MagicMock() mock_settings = MagicMock() mock_settings.settings.config_dir = tmp_path direct_instance = LocalStorageService(mock_session, mock_settings) assert direct_instance.ready is True assert direct_instance.name == "storage_service" def test_multiple_decorators_on_different_services(self, clean_manager): """Test registering multiple different services.""" clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) clean_manager.register_service_class(ServiceType.TRACING_SERVICE, TracingService, override=True) # All should be registered (plus MockSessionService from fixture) assert len(clean_manager.service_classes) == 4 assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes assert ServiceType.TELEMETRY_SERVICE in clean_manager.service_classes assert ServiceType.TRACING_SERVICE in clean_manager.service_classes # All should be creatable storage = clean_manager.get(ServiceType.STORAGE_SERVICE) telemetry = clean_manager.get(ServiceType.TELEMETRY_SERVICE) tracing = clean_manager.get(ServiceType.TRACING_SERVICE) assert isinstance(storage, LocalStorageService) assert isinstance(telemetry, TelemetryService) assert isinstance(tracing, TracingService)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/services/test_decorator_registration.py", "license": "MIT License", "lines": 118, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/services/test_edge_cases.py
"""Edge case tests for pluggable service system.""" import pytest from lfx.services.base import Service from lfx.services.manager import ServiceManager from lfx.services.schema import ServiceType class MockSessionService(Service): """Mock session service for testing.""" name = "session_service" def __init__(self): """Initialize mock session service.""" self.set_ready() async def teardown(self) -> None: """Teardown the mock session service.""" @pytest.fixture def clean_manager(): """Create a clean ServiceManager instance with mock dependencies.""" manager = ServiceManager() # Register mock SESSION_SERVICE so services with dependencies can be created manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) class TestCircularDependencyDetection: """Test detection and handling of circular dependencies.""" def test_self_circular_dependency(self, clean_manager): """Test service that depends on itself.""" class SelfCircularService(Service): @property def name(self) -> str: return "self_circular" def __init__(self, storage_service): super().__init__() self.storage = storage_service self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SelfCircularService) # Should raise RecursionError or TypeError (missing required argument) with pytest.raises((RecursionError, RuntimeError, TypeError)): clean_manager.get(ServiceType.STORAGE_SERVICE) class TestServiceLifecycle: """Test service lifecycle management.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_service_ready_state(self, clean_manager): """Test service ready state tracking.""" class SlowInitService(Service): @property def name(self) -> str: return "slow_service" def __init__(self): super().__init__() # Don't set ready immediately def complete_init(self): self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SlowInitService) service = clean_manager.get(ServiceType.STORAGE_SERVICE) # Should not be ready yet assert service.ready is False # Complete initialization service.complete_init() assert service.ready is True @pytest.mark.asyncio async def test_service_teardown_called(self, clean_manager): """Test that teardown is called on services.""" teardown_called = [] class TeardownTrackingService(Service): name = "tracking_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: teardown_called.append(True) clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, TeardownTrackingService) # Create service clean_manager.get(ServiceType.STORAGE_SERVICE) # Teardown await clean_manager.teardown() # Should have been called assert len(teardown_called) == 1 @pytest.mark.asyncio async def test_multiple_teardowns_safe(self, clean_manager): """Test that calling teardown multiple times is safe.""" class SimpleService(Service): name = "simple_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SimpleService) clean_manager.get(ServiceType.STORAGE_SERVICE) # Teardown multiple times - should not raise await clean_manager.teardown() await clean_manager.teardown() class TestConfigParsingEdgeCases: """Test edge cases in configuration parsing.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock SESSION_SERVICE so services with dependencies can be created manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_empty_config_file(self, clean_manager, tmp_path): """Test empty configuration file.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text("") # Should not raise clean_manager.discover_plugins(config_dir) assert len(clean_manager.service_classes) == 1 # MockSessionService from fixture def test_config_with_no_services_section(self, clean_manager, tmp_path): """Test config file with no [services] section.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [other_section] key = "value" """ ) # Should not raise clean_manager.discover_plugins(config_dir) assert len(clean_manager.service_classes) == 1 # MockSessionService from fixture def test_config_with_empty_services_section(self, clean_manager, tmp_path): """Test config with empty [services] section.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] """ ) # Should not raise clean_manager.discover_plugins(config_dir) assert len(clean_manager.service_classes) == 1 # MockSessionService from fixture def test_config_with_malformed_import_path(self, clean_manager, tmp_path): """Test config with malformed import path.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "invalid_path_without_colon" """ ) # Should not raise, just log warning clean_manager.discover_plugins(config_dir) assert ServiceType.STORAGE_SERVICE not in clean_manager.service_classes def test_config_with_too_many_colons(self, clean_manager, tmp_path): """Test config with too many colons in import path.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "module:submodule:class:extra" """ ) # Should not raise, just log warning clean_manager.discover_plugins(config_dir) assert ServiceType.STORAGE_SERVICE not in clean_manager.service_classes class TestServiceRegistrationEdgeCases: """Test edge cases in service registration.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_register_non_service_class(self, clean_manager): """Test registering a class that doesn't inherit from Service.""" class NotAService: @property def name(self) -> str: return "not_service" def __init__(self): pass async def teardown(self) -> None: pass # Should not raise during registration clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, NotAService) # Gets created successfully but doesn't have service methods service = clean_manager.get(ServiceType.STORAGE_SERVICE) assert service is not None # But won't have ready attribute since it doesn't inherit from Service assert not hasattr(service, "_ready") def test_register_abstract_service(self, clean_manager): """Test registering an abstract service class.""" from abc import ABC, abstractmethod class AbstractService(Service, ABC): name = "abstract_service" @abstractmethod def do_something(self): pass async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, AbstractService) # Should fail due to abstract methods with pytest.raises(TypeError): clean_manager.get(ServiceType.STORAGE_SERVICE) def test_register_same_service_multiple_times_with_override(self, clean_manager): """Test registering same service type multiple times with override.""" class Service1(Service): name = "service1" async def teardown(self) -> None: pass class Service2(Service): name = "service2" async def teardown(self) -> None: pass class Service3(Service): name = "service3" async def teardown(self) -> None: pass # Register multiple times clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, Service1, override=True) clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, Service2, override=True) clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, Service3, override=True) # Should have the last one assert clean_manager.service_classes[ServiceType.STORAGE_SERVICE] == Service3 class TestDependencyInjectionEdgeCases: """Test edge cases in dependency injection.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_service_with_optional_dependencies(self, clean_manager): """Test service with optional parameters.""" class ServiceWithOptional(Service): name = "optional_service" def __init__(self, settings_service=None): super().__init__() self.settings = settings_service self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, ServiceWithOptional) service = clean_manager.get(ServiceType.STORAGE_SERVICE) # Should have settings injected from lfx.services.settings.service import SettingsService assert isinstance(service.settings, SettingsService) def test_service_with_no_init_params(self, clean_manager): """Test service that takes no init parameters.""" class NoParamService(Service): name = "no_param_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, NoParamService) service = clean_manager.get(ServiceType.STORAGE_SERVICE) assert service.ready is True def test_service_with_non_service_params(self, clean_manager): """Test service with parameters that aren't services.""" class ServiceWithConfig(Service): name = "config_service" def __init__(self, config: dict): super().__init__() self.config = config self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, ServiceWithConfig) # Should fail - can't resolve dict parameter with pytest.raises(TypeError): clean_manager.get(ServiceType.STORAGE_SERVICE) class TestConcurrentAccess: """Test concurrent access to service manager.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_multiple_gets_return_same_instance(self, clean_manager): """Test that multiple get calls return same instance.""" class SimpleService(Service): name = "simple_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SimpleService) # Get multiple times service1 = clean_manager.get(ServiceType.STORAGE_SERVICE) service2 = clean_manager.get(ServiceType.STORAGE_SERVICE) service3 = clean_manager.get(ServiceType.STORAGE_SERVICE) # Should all be the same instance assert service1 is service2 assert service2 is service3 class TestSettingsServiceProtection: """Test settings service protection mechanisms.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_cannot_register_settings_via_class(self, clean_manager): """Test that settings service cannot be registered via class.""" class CustomSettings(Service): name = "settings_service" async def teardown(self) -> None: pass with pytest.raises(ValueError, match="Settings service cannot be registered"): clean_manager.register_service_class(ServiceType.SETTINGS_SERVICE, CustomSettings) def test_cannot_register_settings_via_decorator(self): """Test that settings service cannot be registered via decorator.""" from lfx.services.registry import register_service with pytest.raises(ValueError, match="Settings service cannot be registered"): @register_service(ServiceType.SETTINGS_SERVICE) class CustomSettings(Service): name = "settings_service" async def teardown(self) -> None: pass def test_settings_service_always_uses_factory(self, clean_manager): """Test that settings service always uses factory.""" settings = clean_manager.get(ServiceType.SETTINGS_SERVICE) from lfx.services.settings.service import SettingsService # Should be the built-in SettingsService assert isinstance(settings, SettingsService) def test_cannot_override_settings_in_config(self, clean_manager, tmp_path): """Test that settings service in config is ignored.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] settings_service = "some.custom:SettingsService" """ ) # Should not raise, but should ignore the settings_service entry clean_manager.discover_plugins(config_dir) # Settings should not be in service_classes assert ServiceType.SETTINGS_SERVICE not in clean_manager.service_classes # Getting settings should still work (via factory) settings = clean_manager.get(ServiceType.SETTINGS_SERVICE) from lfx.services.settings.service import SettingsService assert isinstance(settings, SettingsService)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/services/test_edge_cases.py", "license": "MIT License", "lines": 371, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/services/test_integration.py
"""Integration tests for pluggable service system.""" import os import pytest from lfx.services.base import Service from lfx.services.manager import ServiceManager from lfx.services.schema import ServiceType from .conftest import MockSessionService class TestStandaloneLFX: """Test LFX running standalone without langflow.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_minimal_storage_service_loads(self, clean_manager): """Test that minimal storage service loads by default.""" from lfx.services.storage.local import LocalStorageService # Register the minimal storage service clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) storage = clean_manager.get(ServiceType.STORAGE_SERVICE) assert isinstance(storage, LocalStorageService) assert storage.ready is True def test_minimal_telemetry_service_loads(self, clean_manager): """Test that minimal telemetry service loads by default.""" # Should fall back to factory since no plugin registered # Telemetry doesn't have a default factory, so should fail # unless we register it first from lfx.services.telemetry.service import TelemetryService clean_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService) telemetry = clean_manager.get(ServiceType.TELEMETRY_SERVICE) assert isinstance(telemetry, TelemetryService) assert telemetry.ready is True def test_minimal_variable_service_loads(self, clean_manager): """Test that minimal variable service loads by default.""" from lfx.services.variable.service import VariableService clean_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService) variables = clean_manager.get(ServiceType.VARIABLE_SERVICE) assert isinstance(variables, VariableService) assert variables.ready is True def test_settings_service_always_available(self, clean_manager): """Test that settings service is always available.""" settings = clean_manager.get(ServiceType.SETTINGS_SERVICE) from lfx.services.settings.service import SettingsService assert isinstance(settings, SettingsService) assert settings.ready is True class TestLFXWithLangflowConfig: """Test LFX with langflow configuration.""" @pytest.fixture def langflow_config_dir(self, tmp_path): """Create a temporary langflow-style config directory.""" config_dir = tmp_path / "config" config_dir.mkdir() # Create lfx.toml with langflow services config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" cache_service = "lfx.services.cache.service:ThreadingInMemoryCache" """ ) return config_dir @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_config_overrides_defaults(self, clean_manager, langflow_config_dir): """Test that config file overrides default services.""" # Discover plugins from config clean_manager.discover_plugins(langflow_config_dir) # Storage should be loaded from config assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes from lfx.services.storage.local import LocalStorageService assert clean_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService def test_multiple_services_from_config(self, clean_manager, langflow_config_dir): """Test loading multiple services from config.""" clean_manager.discover_plugins(langflow_config_dir) # Both services should be registered assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes assert ServiceType.CACHE_SERVICE in clean_manager.service_classes class TestServiceOverrideScenarios: """Test various service override scenarios.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_decorator_overrides_config(self, clean_manager, tmp_path): """Test that decorator registration overrides config.""" # First create config config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) # Load from config clean_manager.discover_plugins(config_dir) # Now override with direct registration (simulating decorator) class CustomStorageService(Service): @property def name(self) -> str: return "storage_service" async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, CustomStorageService, override=True) # Should use the override version assert clean_manager.service_classes[ServiceType.STORAGE_SERVICE] == CustomStorageService def test_override_false_preserves_existing(self, clean_manager): """Test that override=False preserves existing registration.""" class FirstService(Service): name = "storage_service" async def teardown(self) -> None: pass class SecondService(Service): name = "storage_service" async def teardown(self) -> None: pass # Register first clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, FirstService, override=True) # Try to register second with override=False clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SecondService, override=False) # Should still be first assert clean_manager.service_classes[ServiceType.STORAGE_SERVICE] == FirstService class TestErrorConditions: """Test error handling in various conditions.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_missing_import_in_config(self, clean_manager, tmp_path): """Test handling of missing import in config.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "nonexistent.module:NonexistentClass" """ ) # Should not raise, just log warning clean_manager.discover_plugins(config_dir) # Service should not be registered assert ServiceType.STORAGE_SERVICE not in clean_manager.service_classes def test_invalid_service_type_in_config(self, clean_manager, tmp_path): """Test handling of invalid service type in config.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] invalid_service_type = "some.module:SomeClass" """ ) # Should not raise, just log warning clean_manager.discover_plugins(config_dir) # No services should be registered from config (only SESSION_SERVICE from fixture) assert len(clean_manager.service_classes) == 1 assert ServiceType.SESSION_SERVICE in clean_manager.service_classes def test_malformed_toml_in_config(self, clean_manager, tmp_path): """Test handling of malformed TOML.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "lfx.toml" config_file.write_text( """ [services storage_service = "lfx.services.storage.local:LocalStorageService" """ ) # Should not raise, just log warning clean_manager.discover_plugins(config_dir) # No services should be registered from config (only SESSION_SERVICE from fixture) assert len(clean_manager.service_classes) == 1 assert ServiceType.SESSION_SERVICE in clean_manager.service_classes def test_service_without_name_attribute(self, clean_manager): """Test registering a service without name attribute.""" class InvalidService(Service): async def teardown(self) -> None: pass # Should not raise during registration clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, InvalidService) # But should fail during creation (can't instantiate abstract class) with pytest.raises(TypeError, match="Can't instantiate abstract class"): clean_manager.get(ServiceType.STORAGE_SERVICE) def test_service_initialization_failure(self, clean_manager): """Test handling of service initialization failure.""" class FailingService(Service): name = "storage_service" def __init__(self): msg = "Initialization failed" raise RuntimeError(msg) async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, FailingService) # Should raise the initialization error with pytest.raises(RuntimeError, match="Initialization failed"): clean_manager.get(ServiceType.STORAGE_SERVICE) class TestDependencyResolution: """Test dependency resolution and injection.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_service_with_settings_dependency(self, clean_manager): """Test service that depends on settings service.""" class ServiceWithSettings(Service): name = "test_service" def __init__(self, settings_service): super().__init__() self.settings = settings_service self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, ServiceWithSettings) service = clean_manager.get(ServiceType.STORAGE_SERVICE) # Should have settings injected from lfx.services.settings.service import SettingsService assert isinstance(service.settings, SettingsService) def test_service_with_multiple_dependencies(self, clean_manager): """Test service with multiple dependencies.""" class SimpleService(Service): name = "simple_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: pass class ComplexService(Service): name = "complex_service" def __init__(self, settings_service, storage_service): super().__init__() self.settings = settings_service self.storage = storage_service self.set_ready() async def teardown(self) -> None: pass # Register both clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, SimpleService) clean_manager.register_service_class(ServiceType.CACHE_SERVICE, ComplexService) # Create complex service complex_service = clean_manager.get(ServiceType.CACHE_SERVICE) # Should have both dependencies from lfx.services.settings.service import SettingsService assert isinstance(complex_service.settings, SettingsService) assert isinstance(complex_service.storage, SimpleService) def test_service_with_unresolvable_dependency(self, clean_manager): """Test service with dependency that can't be resolved.""" class ServiceWithUnknownDep(Service): name = "test_service" def __init__(self, unknown_param: str): super().__init__() self.unknown_param = unknown_param self.set_ready() async def teardown(self) -> None: pass clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, ServiceWithUnknownDep) # Should raise due to missing parameter with pytest.raises(TypeError): clean_manager.get(ServiceType.STORAGE_SERVICE) class TestConfigFileDiscovery: """Test configuration file discovery.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_pyproject_toml_discovery(self, clean_manager, tmp_path): """Test discovering services from pyproject.toml.""" config_dir = tmp_path / "config" config_dir.mkdir() config_file = config_dir / "pyproject.toml" config_file.write_text( """ [tool.lfx.services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) clean_manager.discover_plugins(config_dir) assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes def test_lfx_toml_takes_precedence(self, clean_manager, tmp_path): """Test that lfx.toml takes precedence over pyproject.toml.""" config_dir = tmp_path / "config" config_dir.mkdir() # Create both files with different services (config_dir / "lfx.toml").write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) (config_dir / "pyproject.toml").write_text( """ [tool.lfx.services] cache_service = "lfx.services.cache.service:ThreadingInMemoryCache" """ ) clean_manager.discover_plugins(config_dir) # Should only have storage from lfx.toml assert ServiceType.STORAGE_SERVICE in clean_manager.service_classes assert ServiceType.CACHE_SERVICE not in clean_manager.service_classes def test_no_config_file_no_error(self, clean_manager, tmp_path): """Test that missing config files don't cause errors.""" config_dir = tmp_path / "config" config_dir.mkdir() # Should not raise clean_manager.discover_plugins(config_dir) # No services should be registered from config (only SESSION_SERVICE from fixture) assert len(clean_manager.service_classes) == 1 assert ServiceType.SESSION_SERVICE in clean_manager.service_classes class TestEnvironmentVariableIntegration: """Test environment variable integration with services.""" @pytest.fixture def clean_manager(self): """Create a clean ServiceManager instance.""" manager = ServiceManager() # Register mock session service as dependency for LocalStorageService manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager # Cleanup import asyncio asyncio.run(manager.teardown()) def test_variable_service_uses_env(self, clean_manager): """Test that variable service reads from environment.""" from lfx.services.variable.service import VariableService clean_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService) os.environ["TEST_API_KEY"] = "test_value_123" # pragma: allowlist secret try: variables = clean_manager.get(ServiceType.VARIABLE_SERVICE) value = variables.get_variable("TEST_API_KEY") assert value == "test_value_123" finally: del os.environ["TEST_API_KEY"] def test_variable_service_in_memory_overrides_env(self, clean_manager): """Test that in-memory variables override environment.""" from lfx.services.variable.service import VariableService clean_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService) os.environ["TEST_VAR"] = "env_value" try: variables = clean_manager.get(ServiceType.VARIABLE_SERVICE) variables.set_variable("TEST_VAR", "memory_value") value = variables.get_variable("TEST_VAR") assert value == "memory_value" finally: del os.environ["TEST_VAR"]
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/services/test_integration.py", "license": "MIT License", "lines": 383, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/services/test_minimal_services.py
"""Tests for minimal service implementations in LFX.""" import os import pytest from lfx.services.storage.local import LocalStorageService from lfx.services.telemetry.service import TelemetryService from lfx.services.tracing.service import TracingService from lfx.services.variable.service import VariableService class TestLocalStorageService: """Tests for LocalStorageService.""" @pytest.fixture def storage(self, mock_session_service, mock_settings_service): """Create a storage service with temp directory.""" return LocalStorageService(mock_session_service, mock_settings_service) @pytest.mark.asyncio async def test_save_and_get_file(self, storage): """Test saving and retrieving a file.""" data = b"test content" await storage.save_file("flow_123", "test.txt", data) retrieved = await storage.get_file("flow_123", "test.txt") assert retrieved == data @pytest.mark.asyncio async def test_list_files(self, storage): """Test listing files in a flow.""" await storage.save_file("flow_123", "file1.txt", b"content1") await storage.save_file("flow_123", "file2.txt", b"content2") files = await storage.list_files("flow_123") assert len(files) == 2 assert "file1.txt" in files assert "file2.txt" in files @pytest.mark.asyncio async def test_delete_file(self, storage): """Test deleting a file.""" await storage.save_file("flow_123", "test.txt", b"content") await storage.delete_file("flow_123", "test.txt") with pytest.raises(FileNotFoundError): await storage.get_file("flow_123", "test.txt") @pytest.mark.asyncio async def test_get_file_size(self, storage): """Test getting file size.""" data = b"test content" await storage.save_file("flow_123", "test.txt", data) size = await storage.get_file_size("flow_123", "test.txt") assert size == len(data) @pytest.mark.asyncio async def test_get_nonexistent_file(self, storage): """Test getting a file that doesn't exist.""" with pytest.raises(FileNotFoundError): await storage.get_file("flow_123", "nonexistent.txt") def test_build_full_path(self, storage, mock_settings_service): """Test building full file path.""" path = storage.build_full_path("flow_123", "test.txt") config_dir = mock_settings_service.settings.config_dir expected = f"{config_dir}/flow_123/test.txt" assert path == expected @pytest.mark.asyncio async def test_list_files_empty_flow(self, storage): """Test listing files in nonexistent flow.""" files = await storage.list_files("nonexistent_flow") assert files == [] def test_service_ready(self, storage): """Test that service is marked as ready.""" assert storage.ready is True assert storage.name == "storage_service" @pytest.mark.asyncio async def test_teardown(self, storage): """Test service teardown.""" await storage.teardown() # Should not raise class TestTelemetryService: """Tests for minimal TelemetryService.""" @pytest.fixture def telemetry(self): """Create a telemetry service.""" return TelemetryService() def test_service_ready(self, telemetry): """Test that service is ready.""" assert telemetry.ready is True assert telemetry.name == "telemetry_service" @pytest.mark.asyncio async def test_log_exception(self, telemetry): """Test logging an exception (noop).""" # Should not raise exc = ValueError("test error") await telemetry.log_exception(exc, "test_context") @pytest.mark.asyncio async def test_log_package_version(self, telemetry): """Test logging package version (noop).""" # Should not raise await telemetry.log_package_version() @pytest.mark.asyncio async def test_teardown(self, telemetry): """Test service teardown.""" await telemetry.teardown() # Should not raise class TestTracingService: """Tests for minimal TracingService.""" @pytest.fixture def tracing(self): """Create a tracing service.""" return TracingService() def test_service_ready(self, tracing): """Test that service is ready.""" assert tracing.ready is True assert tracing.name == "tracing_service" def test_add_log(self, tracing): """Test adding a log entry (outputs to debug).""" # Should not raise tracing.add_log("test_trace", {"message": "test log"}) @pytest.mark.asyncio async def test_teardown(self, tracing): """Test service teardown.""" await tracing.teardown() # Should not raise class TestVariableService: """Tests for minimal VariableService.""" @pytest.fixture def variables(self): """Create a variable service.""" return VariableService() def test_service_ready(self, variables): """Test that service is ready.""" assert variables.ready is True assert variables.name == "variable_service" def test_set_and_get_variable(self, variables): """Test setting and getting a variable.""" variables.set_variable("test_key", "test_value") value = variables.get_variable("test_key") assert value == "test_value" def test_get_from_environment(self, variables): """Test getting variable from environment.""" os.environ["TEST_ENV_VAR"] = "env_value" try: value = variables.get_variable("TEST_ENV_VAR") assert value == "env_value" finally: del os.environ["TEST_ENV_VAR"] def test_get_nonexistent_variable(self, variables): """Test getting a variable that doesn't exist.""" value = variables.get_variable("nonexistent_key") assert value is None def test_delete_variable(self, variables): """Test deleting a variable.""" variables.set_variable("test_key", "test_value") variables.delete_variable("test_key") value = variables.get_variable("test_key") assert value is None def test_list_variables(self, variables): """Test listing variables.""" variables.set_variable("key1", "value1") variables.set_variable("key2", "value2") vars_list = variables.list_variables() assert "key1" in vars_list assert "key2" in vars_list def test_in_memory_overrides_env(self, variables): """Test that in-memory variables override environment.""" os.environ["TEST_VAR"] = "env_value" try: variables.set_variable("TEST_VAR", "memory_value") value = variables.get_variable("TEST_VAR") assert value == "memory_value" finally: del os.environ["TEST_VAR"] @pytest.mark.asyncio async def test_teardown(self, variables): """Test service teardown clears variables.""" variables.set_variable("test_key", "test_value") await variables.teardown() # Variables should be cleared (verify via public API) assert variables.list_variables() == [] assert variables.get_variable("test_key") is None class TestMinimalServicesIntegration: """Integration tests for minimal services working together.""" @pytest.mark.asyncio async def test_all_minimal_services_initialize(self, mock_session_service, mock_settings_service): """Test that all minimal services can be initialized.""" storage = LocalStorageService(mock_session_service, mock_settings_service) telemetry = TelemetryService() tracing = TracingService() variables = VariableService() assert storage.ready assert telemetry.ready assert tracing.ready assert variables.ready @pytest.mark.asyncio async def test_minimal_services_teardown_all(self, mock_session_service, mock_settings_service): """Test tearing down all minimal services.""" storage = LocalStorageService(mock_session_service, mock_settings_service) telemetry = TelemetryService() tracing = TracingService() variables = VariableService() # Should all teardown without errors await storage.teardown() await telemetry.teardown() await tracing.teardown() await variables.teardown() @pytest.mark.asyncio async def test_storage_with_tracing(self, mock_session_service, mock_settings_service): """Test using storage with tracing.""" storage = LocalStorageService(mock_session_service, mock_settings_service) tracing = TracingService() tracing.add_log("storage_test", {"operation": "save", "flow_id": "123"}) await storage.save_file("flow_123", "test.txt", b"content") tracing.add_log("storage_test", {"operation": "saved", "flow_id": "123"}) # Should complete without errors assert await storage.get_file("flow_123", "test.txt") == b"content"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/services/test_minimal_services.py", "license": "MIT License", "lines": 205, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/services/test_service_manager.py
"""Tests for the ServiceManager plugin system.""" from pathlib import Path import pytest from lfx.services.base import Service from lfx.services.manager import NoFactoryRegisteredError, ServiceManager from lfx.services.schema import ServiceType from lfx.services.storage.local import LocalStorageService from lfx.services.telemetry.service import TelemetryService from lfx.services.tracing.service import TracingService from lfx.services.variable.service import VariableService from .conftest import MockSessionService @pytest.fixture def service_manager(): """Create a fresh ServiceManager for each test.""" import asyncio manager = ServiceManager() manager.register_service_class(ServiceType.SESSION_SERVICE, MockSessionService, override=True) yield manager asyncio.run(manager.teardown()) @pytest.fixture def temp_config_dir(tmp_path): """Create a temporary config directory.""" config_dir = tmp_path / "config" config_dir.mkdir() return config_dir class TestServiceRegistration: """Tests for service registration with real implementations.""" def test_register_storage_service(self, service_manager): """Test registering the real LocalStorageService.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes assert service_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService def test_register_multiple_real_services(self, service_manager): """Test registering multiple real services.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) service_manager.register_service_class(ServiceType.TRACING_SERVICE, TracingService, override=True) service_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService, override=True) # 4 services + SESSION_SERVICE from fixture = 5 assert len(service_manager.service_classes) == 5 assert service_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService assert service_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TelemetryService assert service_manager.service_classes[ServiceType.TRACING_SERVICE] == TracingService assert service_manager.service_classes[ServiceType.VARIABLE_SERVICE] == VariableService def test_register_service_class_no_override(self, service_manager): """Test that override=False prevents replacement.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) # Try to register different class with override=False service_manager.register_service_class(ServiceType.STORAGE_SERVICE, TelemetryService, override=False) # Should still have the original assert service_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService def test_register_service_class_with_override(self, service_manager): """Test that override=True replaces existing registration.""" service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService, override=True) # Override with different service service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TracingService, override=True) # Should have the new one assert service_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TracingService def test_cannot_register_settings_service(self, service_manager): """Test that settings service cannot be registered via plugins.""" with pytest.raises(ValueError, match="Settings service cannot be registered"): service_manager.register_service_class(ServiceType.SETTINGS_SERVICE, LocalStorageService) class TestPluginDiscovery: """Tests for plugin discovery with real service paths.""" def test_discover_storage_from_config_file(self, service_manager, temp_config_dir): """Test discovering LocalStorageService from lfx.toml.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) service_manager.discover_plugins(temp_config_dir) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes assert service_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService def test_discover_multiple_services_from_config(self, service_manager, temp_config_dir): """Test discovering multiple real services from config.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" telemetry_service = "lfx.services.telemetry.service:TelemetryService" tracing_service = "lfx.services.tracing.service:TracingService" variable_service = "lfx.services.variable.service:VariableService" """ ) service_manager.discover_plugins(temp_config_dir) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes assert ServiceType.TELEMETRY_SERVICE in service_manager.service_classes assert ServiceType.TRACING_SERVICE in service_manager.service_classes assert ServiceType.VARIABLE_SERVICE in service_manager.service_classes def test_discover_from_pyproject_toml(self, service_manager, temp_config_dir): """Test discovering services from pyproject.toml.""" config_file = temp_config_dir / "pyproject.toml" config_file.write_text( """ [tool.lfx.services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) service_manager.discover_plugins(temp_config_dir) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes def test_lfx_toml_takes_precedence_over_pyproject(self, service_manager, temp_config_dir): """Test that lfx.toml is preferred over pyproject.toml.""" # Create both files (temp_config_dir / "lfx.toml").write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) (temp_config_dir / "pyproject.toml").write_text( """ [tool.lfx.services] telemetry_service = "lfx.services.telemetry.service:TelemetryService" """ ) service_manager.discover_plugins(temp_config_dir) # Should have loaded from lfx.toml (storage_service) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes # Should NOT have loaded from pyproject.toml (telemetry_service) assert ServiceType.TELEMETRY_SERVICE not in service_manager.service_classes def test_discover_plugins_only_once(self, service_manager, temp_config_dir): """Test that plugin discovery only runs once.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) service_manager.discover_plugins(temp_config_dir) initial_count = len(service_manager.service_classes) # Try to discover again service_manager.discover_plugins(temp_config_dir) # Should not have changed assert len(service_manager.service_classes) == initial_count def test_invalid_service_key_in_config(self, service_manager, temp_config_dir): """Test that invalid service keys are ignored.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] invalid_service_key = "some.module:SomeClass" # pragma: allowlist secret storage_service = "lfx.services.storage.local:LocalStorageService" """ ) # Should not raise, just log warning service_manager.discover_plugins(temp_config_dir) # Valid service should still be registered assert ServiceType.STORAGE_SERVICE in service_manager.service_classes def test_invalid_import_path_in_config(self, service_manager, temp_config_dir): """Test that invalid import paths are handled gracefully.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "nonexistent.module:NonexistentClass" """ ) # Should not raise, just log warning service_manager.discover_plugins(temp_config_dir) # Service should not be registered assert ServiceType.STORAGE_SERVICE not in service_manager.service_classes class TestServiceCreation: """Tests for creating real services with dependency injection.""" def test_create_storage_service(self, service_manager): """Test creating LocalStorageService.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) service = service_manager.get(ServiceType.STORAGE_SERVICE) assert isinstance(service, LocalStorageService) assert service.ready is True assert service.name == "storage_service" def test_create_telemetry_service(self, service_manager): """Test creating TelemetryService.""" service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService) service = service_manager.get(ServiceType.TELEMETRY_SERVICE) assert isinstance(service, TelemetryService) assert service.ready is True assert service.name == "telemetry_service" def test_create_tracing_service(self, service_manager): """Test creating TracingService.""" service_manager.register_service_class(ServiceType.TRACING_SERVICE, TracingService) service = service_manager.get(ServiceType.TRACING_SERVICE) assert isinstance(service, TracingService) assert service.ready is True assert service.name == "tracing_service" def test_create_variable_service(self, service_manager): """Test creating VariableService.""" service_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService) service = service_manager.get(ServiceType.VARIABLE_SERVICE) assert isinstance(service, VariableService) assert service.ready is True assert service.name == "variable_service" def test_create_service_with_settings_dependency(self, service_manager): """Test creating a service that depends on settings.""" # Create a real service that needs settings class ServiceWithSettings(Service): @property def name(self) -> str: return "test_service" def __init__(self, settings_service): super().__init__() self.settings = settings_service self.set_ready() async def teardown(self) -> None: pass service_manager.register_service_class(ServiceType.STORAGE_SERVICE, ServiceWithSettings) service = service_manager.get(ServiceType.STORAGE_SERVICE) # Should have settings injected from lfx.services.settings.service import SettingsService assert isinstance(service, ServiceWithSettings) assert isinstance(service.settings, SettingsService) assert service.ready is True def test_create_service_caching(self, service_manager): """Test that services are cached (singleton).""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) service1 = service_manager.get(ServiceType.STORAGE_SERVICE) service2 = service_manager.get(ServiceType.STORAGE_SERVICE) assert service1 is service2 def test_settings_service_always_uses_factory(self, service_manager): """Test that settings service always uses factory.""" service = service_manager.get(ServiceType.SETTINGS_SERVICE) from lfx.services.settings.service import SettingsService assert isinstance(service, SettingsService) def test_fallback_to_factory_if_no_plugin(self, service_manager): """Test that services fall back to factory if no plugin registered.""" # Don't register any plugin for storage # Should fail since there's no factory either with pytest.raises(NoFactoryRegisteredError): service_manager.get(ServiceType.STORAGE_SERVICE) class TestConflictResolution: """Tests for conflict resolution with real services.""" def test_direct_registration_overrides_config(self, service_manager, temp_config_dir): """Test that direct registration overrides config file.""" # First load from config config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] telemetry_service = "lfx.services.telemetry.service:TelemetryService" """ ) service_manager.discover_plugins(temp_config_dir) # Then register via direct call (override=True) service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TracingService, override=True) # Should use the directly registered service assert service_manager.service_classes[ServiceType.TELEMETRY_SERVICE] == TracingService class TestTeardown: """Tests for service teardown with real services.""" @pytest.mark.asyncio async def test_teardown_all_services(self, service_manager): """Test that teardown clears all services.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService) service_manager.get(ServiceType.STORAGE_SERVICE) service_manager.get(ServiceType.TELEMETRY_SERVICE) await service_manager.teardown() assert len(service_manager.services) == 0 assert len(service_manager.factories) == 0 @pytest.mark.asyncio async def test_teardown_calls_service_teardown(self, service_manager): """Test that teardown calls each service's teardown method.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) storage = service_manager.get(ServiceType.STORAGE_SERVICE) # Service should exist assert storage is not None await service_manager.teardown() # Services should be cleared assert ServiceType.STORAGE_SERVICE not in service_manager.services class TestConfigDirectorySource: """Tests for config_dir parameter with real services.""" def test_config_dir_from_settings_service(self, service_manager): """Test that config_dir comes from settings service.""" # Create settings service first settings_service = service_manager.get(ServiceType.SETTINGS_SERVICE) # Create config in the settings config_dir config_dir = Path(settings_service.settings.config_dir) config_dir.mkdir(parents=True, exist_ok=True) config_file = config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) # Discover plugins (should use settings.config_dir) service_manager._plugins_discovered = False # Reset flag service_manager.discover_plugins() assert ServiceType.STORAGE_SERVICE in service_manager.service_classes def test_config_dir_falls_back_to_cwd(self, service_manager, temp_config_dir): """Test that config_dir falls back to cwd if settings not available.""" # Don't create settings service # Should fall back to provided config_dir config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" """ ) service_manager.discover_plugins(temp_config_dir) # Should have searched temp_config_dir (passed as param) assert service_manager._plugins_discovered is True assert ServiceType.STORAGE_SERVICE in service_manager.service_classes class TestRealWorldScenarios: """Tests for realistic usage scenarios.""" @pytest.mark.asyncio async def test_complete_service_lifecycle(self, service_manager): """Test complete lifecycle: register, create, use, teardown.""" # Register service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) # Create storage = service_manager.get(ServiceType.STORAGE_SERVICE) assert storage.ready is True # Use await storage.save_file("test_flow", "test.txt", b"test content") content = await storage.get_file("test_flow", "test.txt") assert content == b"test content" # Teardown await service_manager.teardown() assert ServiceType.STORAGE_SERVICE not in service_manager.services def test_multiple_services_working_together(self, service_manager): """Test multiple services can coexist and work together.""" # Register all minimal services service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService) service_manager.register_service_class(ServiceType.TELEMETRY_SERVICE, TelemetryService) service_manager.register_service_class(ServiceType.TRACING_SERVICE, TracingService) service_manager.register_service_class(ServiceType.VARIABLE_SERVICE, VariableService) # Create all services storage = service_manager.get(ServiceType.STORAGE_SERVICE) telemetry = service_manager.get(ServiceType.TELEMETRY_SERVICE) tracing = service_manager.get(ServiceType.TRACING_SERVICE) variables = service_manager.get(ServiceType.VARIABLE_SERVICE) # All should be ready assert storage.ready is True assert telemetry.ready is True assert tracing.ready is True assert variables.ready is True # All should be usable tracing.add_log("test_trace", {"message": "test"}) variables.set_variable("TEST_KEY", "test_value") assert variables.get_variable("TEST_KEY") == "test_value" def test_config_file_with_all_minimal_services(self, service_manager, temp_config_dir): """Test loading all minimal services from config file.""" config_file = temp_config_dir / "lfx.toml" config_file.write_text( """ [services] storage_service = "lfx.services.storage.local:LocalStorageService" telemetry_service = "lfx.services.telemetry.service:TelemetryService" tracing_service = "lfx.services.tracing.service:TracingService" variable_service = "lfx.services.variable.service:VariableService" """ ) service_manager.discover_plugins(temp_config_dir) # All services should be registered (4 from config + SESSION_SERVICE from fixture = 5) assert len(service_manager.service_classes) == 5 # Create and verify each service storage = service_manager.get(ServiceType.STORAGE_SERVICE) telemetry = service_manager.get(ServiceType.TELEMETRY_SERVICE) tracing = service_manager.get(ServiceType.TRACING_SERVICE) variables = service_manager.get(ServiceType.VARIABLE_SERVICE) assert isinstance(storage, LocalStorageService) assert isinstance(telemetry, TelemetryService) assert isinstance(tracing, TracingService) assert isinstance(variables, VariableService)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/services/test_service_manager.py", "license": "MIT License", "lines": 369, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/processing/expand_flow.py
"""Expand compact flow format to full flow format. This module provides functionality to expand a minimal/compact flow format (used by AI agents) into the full flow format expected by Langflow. """ from __future__ import annotations from typing import Any from pydantic import BaseModel, Field class CompactNode(BaseModel): """A compact node representation for AI-generated flows.""" id: str type: str values: dict[str, Any] = Field(default_factory=dict) # If edited is True, the node field must contain the full node data edited: bool = False node: dict[str, Any] | None = None class CompactEdge(BaseModel): """A compact edge representation for AI-generated flows.""" source: str source_output: str target: str target_input: str class CompactFlowData(BaseModel): """The compact flow data structure.""" nodes: list[CompactNode] edges: list[CompactEdge] def _get_flat_components(all_types_dict: dict[str, Any]) -> dict[str, Any]: """Flatten the component types dict for easy lookup by component name.""" return { comp_name: comp_data for components in all_types_dict.values() if isinstance(components, dict) for comp_name, comp_data in components.items() } def _expand_node( compact_node: CompactNode, flat_components: dict[str, Any], ) -> dict[str, Any]: """Expand a compact node to full node format. Args: compact_node: The compact node to expand flat_components: Flattened component templates dict Returns: Full node data structure Raises: ValueError: If component type is not found and node is not edited """ # If the node is edited, it should have full node data if compact_node.edited: if not compact_node.node: msg = f"Node {compact_node.id} is marked as edited but has no node data" raise ValueError(msg) return { "id": compact_node.id, "type": "genericNode", "data": { "type": compact_node.type, "node": compact_node.node, "id": compact_node.id, }, } # Look up component template if compact_node.type not in flat_components: msg = f"Component type '{compact_node.type}' not found in component index" raise ValueError(msg) # Fast deepcopy for known structure. # Instead of deepcopy, use shallow copy and per-field dict copy for template subdict. src_data = flat_components[compact_node.type] # Assume template is a dict (if present) if "template" in src_data: # Shallow copy for outer structure template_data = src_data.copy() # Deep copy only 'template' portion (which is mutated and thus not shared) template_data["template"] = template = src_data["template"].copy() else: template_data = src_data.copy() template = template_data.get("template", {}) # Merge user values into template # Use items() directly, reduce field lookups for field_name, field_value in compact_node.values.items(): t_value = template.get(field_name) if t_value is not None: if isinstance(t_value, dict): t_value["value"] = field_value else: template[field_name] = field_value else: # Add as new field if not in template template[field_name] = {"value": field_value} return { "id": compact_node.id, "type": "genericNode", "data": { "type": compact_node.type, "node": template_data, "id": compact_node.id, }, } def _encode_handle(data: dict[str, Any]) -> str: """Encode a handle dict to the special string format used by ReactFlow. Uses œ instead of " for JSON encoding. """ from lfx.utils.util import escape_json_dump return escape_json_dump(data) def _build_source_handle_data( node_id: str, component_type: str, output_name: str, output_types: list[str], ) -> dict[str, Any]: """Build the sourceHandle data dict for an edge.""" return { "dataType": component_type, "id": node_id, "name": output_name, "output_types": output_types, } def _build_target_handle_data( node_id: str, field_name: str, input_types: list[str], field_type: str, ) -> dict[str, Any]: """Build the targetHandle data dict for an edge.""" return { "fieldName": field_name, "id": node_id, "inputTypes": input_types, "type": field_type, } def _expand_edge( compact_edge: CompactEdge, expanded_nodes: dict[str, dict[str, Any]], ) -> dict[str, Any]: """Expand a compact edge to full edge format. Args: compact_edge: The compact edge to expand expanded_nodes: Dict of node_id -> expanded node data Returns: Full edge data structure """ source_node = expanded_nodes.get(compact_edge.source) target_node = expanded_nodes.get(compact_edge.target) if not source_node: msg = f"Source node '{compact_edge.source}' not found" raise ValueError(msg) if not target_node: msg = f"Target node '{compact_edge.target}' not found" raise ValueError(msg) source_node_data = source_node["data"]["node"] target_node_data = target_node["data"]["node"] # Find output types from source node source_outputs = source_node_data.get("outputs", []) source_output = next( (o for o in source_outputs if o.get("name") == compact_edge.source_output), None, ) output_types = source_output.get("types", []) if source_output else [] # If no outputs defined, use base_classes if not output_types: output_types = source_node_data.get("base_classes", []) # Find input types and field type from target node template target_template = target_node_data.get("template", {}) target_field = target_template.get(compact_edge.target_input, {}) input_types = target_field.get("input_types", []) field_type = target_field.get("type", "str") if isinstance(target_field, dict) else "str" if not input_types and isinstance(target_field, dict): input_types = [field_type] source_type = source_node["data"]["type"] # Build handle data objects source_handle_data = _build_source_handle_data( compact_edge.source, source_type, compact_edge.source_output, output_types, ) target_handle_data = _build_target_handle_data( compact_edge.target, compact_edge.target_input, input_types, field_type, ) # Encode handles to string format source_handle_str = _encode_handle(source_handle_data) target_handle_str = _encode_handle(target_handle_data) edge_id = f"reactflow__edge-{compact_edge.source}{source_handle_str}-{compact_edge.target}{target_handle_str}" return { "source": compact_edge.source, "sourceHandle": source_handle_str, "target": compact_edge.target, "targetHandle": target_handle_str, "id": edge_id, "data": { "sourceHandle": source_handle_data, "targetHandle": target_handle_data, }, "className": "", "selected": False, "animated": False, } def expand_compact_flow( compact_data: dict[str, Any], all_types_dict: dict[str, Any], ) -> dict[str, Any]: """Expand a compact flow format to full flow format. Args: compact_data: The compact flow data with nodes and edges all_types_dict: The component types dictionary from component_cache Returns: Full flow data structure ready for Langflow UI Example compact input: { "nodes": [ {"id": "1", "type": "ChatInput"}, {"id": "2", "type": "OpenAIModel", "values": {"model_name": "gpt-4"}} ], "edges": [ {"source": "1", "source_output": "message", "target": "2", "target_input": "input_value"} ] } """ # Parse and validate compact data flow_data = CompactFlowData(**compact_data) # Flatten components for lookup flat_components = _get_flat_components(all_types_dict) # Expand nodes expanded_nodes: dict[str, dict[str, Any]] = {} for compact_node in flow_data.nodes: expanded = _expand_node(compact_node, flat_components) expanded_nodes[compact_node.id] = expanded # Expand edges expanded_edges = [] for compact_edge in flow_data.edges: expanded = _expand_edge(compact_edge, expanded_nodes) expanded_edges.append(expanded) return { "nodes": list(expanded_nodes.values()), "edges": expanded_edges, }
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/processing/expand_flow.py", "license": "MIT License", "lines": 238, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/test_expand_flow.py
"""Tests for expand_compact_flow functionality.""" import pytest from fastapi import status from httpx import AsyncClient from langflow.processing.expand_flow import ( CompactEdge, CompactNode, _expand_edge, _expand_node, _get_flat_components, expand_compact_flow, ) # Sample component data mimicking the component_index structure SAMPLE_COMPONENTS = { "inputs": { "ChatInput": { "display_name": "Chat Input", "description": "Receives text input from user", "template": { "_type": "ChatInput", "input_value": { "type": "str", "required": False, "value": "", "display_name": "Input", }, }, "base_classes": ["Message"], "outputs": [ { "name": "message", "display_name": "Message", "types": ["Message"], } ], }, }, "outputs": { "ChatOutput": { "display_name": "Chat Output", "description": "Displays text output to user", "template": { "_type": "ChatOutput", "input_value": { "type": "Message", "required": True, "value": "", "display_name": "Text", "input_types": ["Message"], }, }, "base_classes": ["Message"], "outputs": [ { "name": "message", "display_name": "Message", "types": ["Message"], } ], }, }, "models": { "OpenAIModel": { "display_name": "OpenAI", "description": "OpenAI language models", "template": { "_type": "OpenAIModel", "model_name": { "type": "str", "required": False, "value": "gpt-4o-mini", "display_name": "Model Name", }, "temperature": { "type": "float", "required": False, "value": 0.1, "display_name": "Temperature", }, "input_value": { "type": "Message", "required": True, "value": "", "display_name": "Input", "input_types": ["Message"], }, }, "base_classes": ["Message", "LanguageModel"], "outputs": [ { "name": "text_output", "display_name": "Text", "types": ["Message"], }, { "name": "model", "display_name": "Model", "types": ["LanguageModel"], }, ], }, }, } class TestGetFlatComponents: def test_flattens_component_dict(self): flat = _get_flat_components(SAMPLE_COMPONENTS) assert "ChatInput" in flat assert "ChatOutput" in flat assert "OpenAIModel" in flat assert len(flat) == 3 def test_empty_dict(self): flat = _get_flat_components({}) assert flat == {} class TestExpandNode: def test_expand_simple_node(self): compact = CompactNode(id="1", type="ChatInput") flat = _get_flat_components(SAMPLE_COMPONENTS) expanded = _expand_node(compact, flat) assert expanded["id"] == "1" assert expanded["type"] == "genericNode" assert expanded["data"]["type"] == "ChatInput" assert "template" in expanded["data"]["node"] def test_expand_node_with_values(self): compact = CompactNode( id="2", type="OpenAIModel", values={"model_name": "gpt-4", "temperature": 0.7}, ) flat = _get_flat_components(SAMPLE_COMPONENTS) expanded = _expand_node(compact, flat) assert expanded["data"]["type"] == "OpenAIModel" template = expanded["data"]["node"]["template"] assert template["model_name"]["value"] == "gpt-4" assert template["temperature"]["value"] == 0.7 def test_expand_node_unknown_type_raises(self): compact = CompactNode(id="1", type="UnknownComponent") flat = _get_flat_components(SAMPLE_COMPONENTS) with pytest.raises(ValueError, match="not found in component index"): _expand_node(compact, flat) def test_expand_edited_node(self): custom_node_data = { "template": {"custom_field": {"value": "custom"}}, "outputs": [], } compact = CompactNode( id="1", type="CustomComponent", edited=True, node=custom_node_data, ) flat = _get_flat_components(SAMPLE_COMPONENTS) expanded = _expand_node(compact, flat) assert expanded["data"]["node"] == custom_node_data def test_expand_edited_node_without_node_data_raises(self): compact = CompactNode(id="1", type="CustomComponent", edited=True) flat = _get_flat_components(SAMPLE_COMPONENTS) with pytest.raises(ValueError, match="marked as edited but has no node data"): _expand_node(compact, flat) class TestExpandEdge: def test_expand_edge(self): compact_edge = CompactEdge( source="1", source_output="message", target="2", target_input="input_value", ) expanded_nodes = { "1": { "id": "1", "type": "genericNode", "data": { "type": "ChatInput", "node": SAMPLE_COMPONENTS["inputs"]["ChatInput"], }, }, "2": { "id": "2", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) assert expanded["source"] == "1" assert expanded["target"] == "2" assert "sourceHandle" in expanded assert "targetHandle" in expanded assert "id" in expanded assert expanded["id"].startswith("reactflow__edge-") def test_expand_edge_source_handle_format(self): """Test that sourceHandle is a JSON-encoded dict with œ as quotes.""" compact_edge = CompactEdge( source="node1", source_output="message", target="node2", target_input="input_value", ) expanded_nodes = { "node1": { "id": "node1", "type": "genericNode", "data": { "type": "ChatInput", "node": SAMPLE_COMPONENTS["inputs"]["ChatInput"], }, }, "node2": { "id": "node2", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) # sourceHandle is JSON-encoded with œ as quotes source_handle = expanded["sourceHandle"] assert "œdataTypeœ" in source_handle assert "œChatInputœ" in source_handle assert "œnode1œ" in source_handle assert "œmessageœ" in source_handle assert "œMessageœ" in source_handle # data.sourceHandle is the actual dict source_data = expanded["data"]["sourceHandle"] assert source_data["dataType"] == "ChatInput" assert source_data["id"] == "node1" assert source_data["name"] == "message" assert source_data["output_types"] == ["Message"] def test_expand_edge_target_handle_format(self): """Test that targetHandle is a JSON-encoded dict with œ as quotes.""" compact_edge = CompactEdge( source="node1", source_output="message", target="node2", target_input="input_value", ) expanded_nodes = { "node1": { "id": "node1", "type": "genericNode", "data": { "type": "ChatInput", "node": SAMPLE_COMPONENTS["inputs"]["ChatInput"], }, }, "node2": { "id": "node2", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) # targetHandle is JSON-encoded with œ as quotes target_handle = expanded["targetHandle"] assert "œfieldNameœ" in target_handle assert "œinput_valueœ" in target_handle assert "œnode2œ" in target_handle assert "œMessageœ" in target_handle # data.targetHandle is the actual dict target_data = expanded["data"]["targetHandle"] assert target_data["fieldName"] == "input_value" assert target_data["id"] == "node2" assert target_data["inputTypes"] == ["Message"] assert target_data["type"] == "Message" def test_expand_edge_with_multiple_output_types(self): """Test edge from component with multiple output types (e.g., OpenAIModel).""" compact_edge = CompactEdge( source="model_node", source_output="model", # The LanguageModel output target="target_node", target_input="some_input", ) expanded_nodes = { "model_node": { "id": "model_node", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, "target_node": { "id": "target_node", "type": "genericNode", "data": { "type": "ChatOutput", "node": SAMPLE_COMPONENTS["outputs"]["ChatOutput"], }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) # Should use LanguageModel type for "model" output source_handle = expanded["sourceHandle"] assert "LanguageModel" in source_handle def test_expand_edge_fallback_to_base_classes(self): """Test that edge falls back to base_classes when output not found.""" compact_edge = CompactEdge( source="node1", source_output="nonexistent_output", target="node2", target_input="input_value", ) # Component without matching output name expanded_nodes = { "node1": { "id": "node1", "type": "genericNode", "data": { "type": "ChatInput", "node": { "base_classes": ["Message", "Data"], "outputs": [], # No outputs defined "template": {}, }, }, }, "node2": { "id": "node2", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) # Should fall back to base_classes source_handle = expanded["sourceHandle"] assert "Message" in source_handle or "Data" in source_handle def test_expand_edge_target_type_fallback(self): """Test that target handle falls back to field type when input_types not present.""" compact_edge = CompactEdge( source="node1", source_output="message", target="node2", target_input="custom_field", ) expanded_nodes = { "node1": { "id": "node1", "type": "genericNode", "data": { "type": "ChatInput", "node": SAMPLE_COMPONENTS["inputs"]["ChatInput"], }, }, "node2": { "id": "node2", "type": "genericNode", "data": { "type": "CustomNode", "node": { "template": { "custom_field": { "type": "str", # No input_types, should use type "value": "", } }, "outputs": [], }, }, }, } expanded = _expand_edge(compact_edge, expanded_nodes) # data.targetHandle should have "str" as the type and inputTypes target_data = expanded["data"]["targetHandle"] assert target_data["type"] == "str" assert target_data["inputTypes"] == ["str"] def test_expand_edge_id_uniqueness(self): """Test that edge IDs are unique for different edges.""" expanded_nodes = { "1": { "id": "1", "type": "genericNode", "data": { "type": "ChatInput", "node": SAMPLE_COMPONENTS["inputs"]["ChatInput"], }, }, "2": { "id": "2", "type": "genericNode", "data": { "type": "OpenAIModel", "node": SAMPLE_COMPONENTS["models"]["OpenAIModel"], }, }, "3": { "id": "3", "type": "genericNode", "data": { "type": "ChatOutput", "node": SAMPLE_COMPONENTS["outputs"]["ChatOutput"], }, }, } edge1 = _expand_edge( CompactEdge(source="1", source_output="message", target="2", target_input="input_value"), expanded_nodes, ) edge2 = _expand_edge( CompactEdge(source="2", source_output="text_output", target="3", target_input="input_value"), expanded_nodes, ) assert edge1["id"] != edge2["id"] def test_expand_edge_missing_source_raises(self): compact_edge = CompactEdge( source="missing", source_output="message", target="2", target_input="input_value", ) expanded_nodes = {"2": {"id": "2", "data": {"type": "X", "node": {}}}} with pytest.raises(ValueError, match="Source node 'missing' not found"): _expand_edge(compact_edge, expanded_nodes) def test_expand_edge_missing_target_raises(self): compact_edge = CompactEdge( source="1", source_output="message", target="missing", target_input="input_value", ) expanded_nodes = {"1": {"id": "1", "data": {"type": "X", "node": {}}}} with pytest.raises(ValueError, match="Target node 'missing' not found"): _expand_edge(compact_edge, expanded_nodes) class TestExpandCompactFlow: def test_expand_simple_flow(self): compact_data = { "nodes": [ {"id": "1", "type": "ChatInput"}, {"id": "2", "type": "OpenAIModel", "values": {"model_name": "gpt-4"}}, {"id": "3", "type": "ChatOutput"}, ], "edges": [ { "source": "1", "source_output": "message", "target": "2", "target_input": "input_value", }, { "source": "2", "source_output": "text_output", "target": "3", "target_input": "input_value", }, ], } expanded = expand_compact_flow(compact_data, SAMPLE_COMPONENTS) assert len(expanded["nodes"]) == 3 assert len(expanded["edges"]) == 2 # Check nodes are properly expanded node_types = {n["data"]["type"] for n in expanded["nodes"]} assert node_types == {"ChatInput", "OpenAIModel", "ChatOutput"} # Check values were merged openai_node = next(n for n in expanded["nodes"] if n["data"]["type"] == "OpenAIModel") assert openai_node["data"]["node"]["template"]["model_name"]["value"] == "gpt-4" def test_expand_flow_no_edges(self): compact_data = { "nodes": [{"id": "1", "type": "ChatInput"}], "edges": [], } expanded = expand_compact_flow(compact_data, SAMPLE_COMPONENTS) assert len(expanded["nodes"]) == 1 assert len(expanded["edges"]) == 0 def test_expand_flow_unknown_component_raises(self): compact_data = { "nodes": [{"id": "1", "type": "UnknownComponent"}], "edges": [], } with pytest.raises(ValueError, match="not found in component index"): expand_compact_flow(compact_data, SAMPLE_COMPONENTS) class TestExpandFlowEndpoint: """Integration tests for the /flows/expand endpoint.""" async def test_expand_flow_endpoint_requires_auth(self, client: AsyncClient): """Test that endpoint requires authentication.""" compact_data = { "nodes": [{"id": "1", "type": "ChatInput"}], "edges": [], } response = await client.post("api/v1/flows/expand/", json=compact_data) # Should return 401 or 403 without auth assert response.status_code in [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN] async def test_expand_flow_endpoint_success(self, client: AsyncClient, logged_in_headers): compact_data = { "nodes": [ {"id": "1", "type": "ChatInput"}, ], "edges": [], } response = await client.post("api/v1/flows/expand/", json=compact_data, headers=logged_in_headers) # Component might not exist in test env, but endpoint should work assert response.status_code in [status.HTTP_200_OK, status.HTTP_400_BAD_REQUEST] async def test_expand_flow_endpoint_invalid_component(self, client: AsyncClient, logged_in_headers): compact_data = { "nodes": [{"id": "1", "type": "NonExistentComponent12345"}], "edges": [], } response = await client.post("api/v1/flows/expand/", json=compact_data, headers=logged_in_headers) assert response.status_code == status.HTTP_400_BAD_REQUEST assert "not found" in response.json()["detail"] async def test_expand_flow_endpoint_invalid_edge(self, client: AsyncClient, logged_in_headers): compact_data = { "nodes": [{"id": "1", "type": "ChatInput"}], "edges": [ { "source": "missing", "source_output": "message", "target": "1", "target_input": "input_value", } ], } response = await client.post("api/v1/flows/expand/", json=compact_data, headers=logged_in_headers) # Should fail due to missing source node assert response.status_code == status.HTTP_400_BAD_REQUEST
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_expand_flow.py", "license": "MIT License", "lines": 522, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_starter_projects_no_hash_history.py
"""Test that starter projects do not contain hash_history in their JSON files. This test ensures that internal component metadata (hash_history) used for tracking component evolution in the component index does not leak into saved flow templates. """ import json from pathlib import Path import pytest def find_hash_history_in_dict(data, path=""): """Recursively search for hash_history keys in nested dictionaries. Args: data: Dictionary or list to search path: Current path in the data structure (for error reporting) Returns: List of paths where hash_history was found """ found_paths = [] if isinstance(data, dict): for key, value in data.items(): current_path = f"{path}.{key}" if path else key if key == "hash_history": found_paths.append(current_path) # Recursively search nested structures found_paths.extend(find_hash_history_in_dict(value, current_path)) elif isinstance(data, list): for i, item in enumerate(data): current_path = f"{path}[{i}]" found_paths.extend(find_hash_history_in_dict(item, current_path)) return found_paths def get_starter_project_files(): """Get all starter project JSON files.""" starter_projects_dir = ( Path(__file__).parent.parent.parent / "base" / "langflow" / "initial_setup" / "starter_projects" ) if not starter_projects_dir.exists(): pytest.skip(f"Starter projects directory not found: {starter_projects_dir}") json_files = list(starter_projects_dir.glob("*.json")) if not json_files: pytest.skip(f"No JSON files found in {starter_projects_dir}") return json_files @pytest.mark.parametrize("project_file", get_starter_project_files()) def test_starter_project_has_no_hash_history(project_file): """Test that a starter project file does not contain hash_history. Hash_history is internal metadata for tracking component code evolution and should only exist in component_index.json, never in saved flows. """ with project_file.open(encoding="utf-8") as f: project_data = json.load(f) # Search for any hash_history keys in the entire project structure hash_history_paths = find_hash_history_in_dict(project_data) assert not hash_history_paths, ( f"Found hash_history in {project_file.name} at paths: {hash_history_paths}\n" "hash_history is internal component metadata and should not be in saved flows. " "It should only exist in component_index.json for tracking component evolution." ) def test_all_starter_projects_loaded(): """Sanity check that we're actually testing starter projects.""" project_files = get_starter_project_files() # We should have multiple starter projects assert len(project_files) > 0, "No starter project files found to test" # Print count for visibility
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_starter_projects_no_hash_history.py", "license": "MIT License", "lines": 60, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py
"""Tests for TextOperations component. Includes regression tests for QA-reported bugs. """ import pytest from lfx.components.processing.text_operations import TextOperations from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame from lfx.schema.message import Message from tests.base import ComponentTestBaseWithoutClient class TestTextOperationsComponent(ComponentTestBaseWithoutClient): @pytest.fixture def component_class(self): """Return the component class to test.""" return TextOperations @pytest.fixture def default_kwargs(self): """Return the default kwargs for the component.""" return { "text_input": "Hello world", "operation": [{"name": "Word Count"}], } @pytest.fixture def file_names_mapping(self): """Return an empty list since this component doesn't have version-specific files.""" return [] class TestTextOperationsWordCount: def test_word_count_basic(self): """Test basic word count operation.""" component = TextOperations() component.text_input = "Hello world this is a test" component.count_words = True component.count_characters = True component.count_lines = True result = component._word_count(component.text_input) assert result["word_count"] == 6 assert result["unique_words"] == 6 assert result["character_count"] == 26 assert result["character_count_no_spaces"] == 21 assert result["line_count"] == 1 assert result["non_empty_lines"] == 1 def test_word_count_multiline(self): """Test word count with multiple lines.""" component = TextOperations() component.count_words = True component.count_characters = True component.count_lines = True result = component._word_count("Line one\nLine two\n\nLine four") assert result["line_count"] == 4 assert result["non_empty_lines"] == 3 def test_word_count_disabled_options(self): """Test word count with disabled options.""" component = TextOperations() component.count_words = False component.count_characters = False component.count_lines = True result = component._word_count("Hello world") assert "word_count" not in result assert "character_count" not in result assert "line_count" in result class TestTextOperationsCaseConversion: def test_case_uppercase(self): """Test uppercase conversion.""" component = TextOperations() component.case_type = "uppercase" result = component._case_conversion("hello world") assert result == "HELLO WORLD" def test_case_lowercase(self): """Test lowercase conversion.""" component = TextOperations() component.case_type = "lowercase" result = component._case_conversion("HELLO WORLD") assert result == "hello world" def test_case_title(self): """Test title case conversion.""" component = TextOperations() component.case_type = "title" result = component._case_conversion("hello world") assert result == "Hello World" def test_case_capitalize(self): """Test capitalize conversion.""" component = TextOperations() component.case_type = "capitalize" result = component._case_conversion("hello world") assert result == "Hello world" def test_case_swapcase(self): """Test swapcase conversion.""" component = TextOperations() component.case_type = "swapcase" result = component._case_conversion("Hello World") assert result == "hELLO wORLD" class TestTextOperationsReplace: def test_replace_simple(self): """Test simple text replacement.""" component = TextOperations() component.search_pattern = "hello" component.replacement_text = "hi" component.use_regex = False result = component._text_replace("hello world") assert result == "hi world" def test_replace_multiple_occurrences(self): """Test replacement of multiple occurrences.""" component = TextOperations() component.search_pattern = "a" component.replacement_text = "X" component.use_regex = False result = component._text_replace("banana") assert result == "bXnXnX" def test_replace_regex(self): """Test regex replacement.""" component = TextOperations() component.search_pattern = r"\d+" component.replacement_text = "NUM" component.use_regex = True result = component._text_replace("abc123def456") assert result == "abcNUMdefNUM" def test_replace_empty_pattern(self): """Test replacement with empty pattern.""" component = TextOperations() component.search_pattern = "" component.replacement_text = "X" component.use_regex = False result = component._text_replace("hello") assert result == "hello" def test_replace_invalid_regex(self): """Test replacement with invalid regex pattern.""" component = TextOperations() component.search_pattern = "[invalid" component.replacement_text = "X" component.use_regex = True component.log = lambda _: None result = component._text_replace("hello") assert result == "hello" class TestTextOperationsExtract: def test_extract_numbers(self): """Test extracting numbers from text.""" component = TextOperations() component.extract_pattern = r"\d+" component.max_matches = 10 result = component._text_extract("abc123def456ghi789") assert result == ["123", "456", "789"] def test_extract_with_limit(self): """Test extraction with max matches limit.""" component = TextOperations() component.extract_pattern = r"\d+" component.max_matches = 2 result = component._text_extract("abc123def456ghi789") assert result == ["123", "456"] def test_extract_no_matches(self): """Test extraction with no matches.""" component = TextOperations() component.extract_pattern = r"\d+" component.max_matches = 10 result = component._text_extract("no numbers here") assert result == [] def test_extract_empty_pattern(self): """Test extraction with empty pattern.""" component = TextOperations() component.extract_pattern = "" component.max_matches = 10 result = component._text_extract("hello") assert result == [] def test_extract_invalid_regex(self): """Test extraction with invalid regex raises ValueError (Bug #3 fix).""" component = TextOperations() component.extract_pattern = "[invalid" component.max_matches = 10 with pytest.raises(ValueError, match="Invalid regex pattern"): component._text_extract("hello") class TestTextOperationsHead: def test_head_basic(self): """Test extracting head of text.""" component = TextOperations() component.head_characters = 5 result = component._text_head("Hello World") assert result == "Hello" def test_head_longer_than_text(self): """Test head with length longer than text.""" component = TextOperations() component.head_characters = 100 result = component._text_head("Hello") assert result == "Hello" def test_head_zero_characters(self): """Test head with zero characters.""" component = TextOperations() component.head_characters = 0 result = component._text_head("Hello") assert result == "" def test_head_negative_characters(self): """Test head with negative characters raises ValueError (Bug #4 fix).""" component = TextOperations() component.head_characters = -5 with pytest.raises(ValueError, match="non-negative"): component._text_head("Hello") class TestTextOperationsTail: def test_tail_basic(self): """Test extracting tail of text.""" component = TextOperations() component.tail_characters = 5 result = component._text_tail("Hello World") assert result == "World" def test_tail_longer_than_text(self): """Test tail with length longer than text.""" component = TextOperations() component.tail_characters = 100 result = component._text_tail("Hello") assert result == "Hello" def test_tail_zero_characters(self): """Test tail with zero characters.""" component = TextOperations() component.tail_characters = 0 result = component._text_tail("Hello") assert result == "" def test_tail_negative_characters(self): """Test tail with negative characters raises ValueError (Bug #7 fix).""" component = TextOperations() component.tail_characters = -5 with pytest.raises(ValueError, match="non-negative"): component._text_tail("Hello") class TestTextOperationsStrip: def test_strip_both(self): """Test stripping from both sides.""" component = TextOperations() component.strip_mode = "both" component.strip_characters = "" result = component._text_strip(" hello ") assert result == "hello" def test_strip_left(self): """Test stripping from left side only.""" component = TextOperations() component.strip_mode = "left" component.strip_characters = "" result = component._text_strip(" hello ") assert result == "hello " def test_strip_right(self): """Test stripping from right side only.""" component = TextOperations() component.strip_mode = "right" component.strip_characters = "" result = component._text_strip(" hello ") assert result == " hello" def test_strip_specific_characters(self): """Test stripping specific characters.""" component = TextOperations() component.strip_mode = "both" component.strip_characters = "xy" result = component._text_strip("xyhelloyx") assert result == "hello" class TestTextOperationsJoin: def test_join_two_texts(self): """Test joining two texts.""" component = TextOperations() component.text_input_2 = "world" result = component._text_join("hello") assert result == "hello\nworld" def test_join_empty_first(self): """Test joining with empty first text.""" component = TextOperations() component.text_input_2 = "world" result = component._text_join("") assert result == "world" def test_join_empty_second(self): """Test joining with empty second text.""" component = TextOperations() component.text_input_2 = "" result = component._text_join("hello") assert result == "hello" def test_join_both_empty(self): """Test joining with both texts empty.""" component = TextOperations() component.text_input_2 = "" result = component._text_join("") assert result == "" class TestTextOperationsClean: def test_clean_extra_spaces(self): """Test removing extra spaces.""" component = TextOperations() component.remove_extra_spaces = True component.remove_special_chars = False component.remove_empty_lines = False result = component._text_clean("hello world") assert result == "hello world" def test_clean_special_chars(self): """Test removing ALL special characters (Bug #10 fix).""" component = TextOperations() component.remove_extra_spaces = False component.remove_special_chars = True component.remove_empty_lines = False result = component._text_clean("hello@world#test!") # All special characters are removed including @ # and ! assert result == "helloworldtest" assert "@" not in result assert "#" not in result assert "!" not in result def test_clean_empty_lines(self): """Test removing empty lines.""" component = TextOperations() component.remove_extra_spaces = False component.remove_special_chars = False component.remove_empty_lines = True result = component._text_clean("line1\n\nline2\n\n\nline3") assert result == "line1\nline2\nline3" def test_clean_all_options(self): """Test all cleaning options together.""" component = TextOperations() component.remove_extra_spaces = True component.remove_special_chars = True component.remove_empty_lines = True result = component._text_clean("hello @world\n\ntest!") assert " " not in result assert "@" not in result class TestTextOperationsToDataFrame: def test_dataframe_basic(self): """Test basic table to DataFrame conversion.""" component = TextOperations() component.table_separator = "|" component.has_header = True component.log = lambda _: None table = "| Name | Age |\n| John | 25 |\n| Jane | 30 |" result = component._text_to_dataframe(table) assert isinstance(result, DataFrame) assert len(result) == 2 assert list(result.columns) == ["Name", "Age"] def test_dataframe_no_header(self): """Test DataFrame conversion without header.""" component = TextOperations() component.table_separator = "|" component.has_header = False component.log = lambda _: None table = "| John | 25 |\n| Jane | 30 |" result = component._text_to_dataframe(table) assert isinstance(result, DataFrame) assert len(result) == 2 assert "col_0" in result.columns def test_dataframe_empty_input(self): """Test DataFrame conversion with empty input.""" component = TextOperations() component.table_separator = "|" component.has_header = True component.log = lambda _: None result = component._text_to_dataframe("") assert isinstance(result, DataFrame) assert len(result) == 0 def test_dataframe_custom_separator(self): """Test DataFrame conversion with custom separator.""" component = TextOperations() component.table_separator = "," component.has_header = True component.log = lambda _: None table = "Name,Age\nJohn,25\nJane,30" result = component._text_to_dataframe(table) assert isinstance(result, DataFrame) assert len(result) == 2 class TestTextOperationsUpdateBuildConfig: def test_update_build_config_word_count(self): """Test build config update for Word Count operation.""" component = TextOperations() build_config = { field: {"show": True} for field in ["count_words", "count_characters", "count_lines", "case_type", "search_pattern"] } result = component.update_build_config(build_config, [{"name": "Word Count"}], "operation") assert result["count_words"]["show"] is True assert result["count_characters"]["show"] is True assert result["count_lines"]["show"] is True assert result["case_type"]["show"] is False assert result["search_pattern"]["show"] is False def test_update_build_config_case_conversion(self): """Test build config update for Case Conversion operation.""" component = TextOperations() build_config = {field: {"show": True} for field in ["count_words", "case_type", "search_pattern"]} result = component.update_build_config(build_config, [{"name": "Case Conversion"}], "operation") assert result["case_type"]["show"] is True assert result["count_words"]["show"] is False def test_update_build_config_text_replace(self): """Test build config update for Text Replace operation.""" component = TextOperations() build_config = { field: {"show": True} for field in ["search_pattern", "replacement_text", "use_regex", "case_type"] } result = component.update_build_config(build_config, [{"name": "Text Replace"}], "operation") assert result["search_pattern"]["show"] is True assert result["replacement_text"]["show"] is True assert result["use_regex"]["show"] is True assert result["case_type"]["show"] is False class TestTextOperationsUpdateOutputs: def test_update_outputs_word_count(self): """Test output update for Word Count operation.""" component = TextOperations() frontend_node = {"outputs": []} result = component.update_outputs(frontend_node, "operation", [{"name": "Word Count"}]) assert len(result["outputs"]) == 1 assert result["outputs"][0].name == "data" def test_update_outputs_dataframe(self): """Test output update for Text to DataFrame operation.""" component = TextOperations() frontend_node = {"outputs": []} result = component.update_outputs(frontend_node, "operation", [{"name": "Text to DataFrame"}]) assert len(result["outputs"]) == 1 assert result["outputs"][0].name == "dataframe" def test_update_outputs_text_join(self): """Test output update for Text Join operation.""" component = TextOperations() frontend_node = {"outputs": []} result = component.update_outputs(frontend_node, "operation", [{"name": "Text Join"}]) assert len(result["outputs"]) == 2 assert result["outputs"][0].name == "text" assert result["outputs"][1].name == "message" def test_update_outputs_message_operations(self): """Test output update for message-returning operations.""" component = TextOperations() for operation in [ "Case Conversion", "Text Replace", "Text Extract", "Text Head", "Text Tail", "Text Strip", "Text Clean", ]: frontend_node = {"outputs": []} result = component.update_outputs(frontend_node, "operation", [{"name": operation}]) assert len(result["outputs"]) == 1 assert result["outputs"][0].name == "message" class TestTextOperationsOutputMethods: def test_get_data_word_count(self): """Test get_data method for Word Count.""" component = TextOperations() component.operation = [{"name": "Word Count"}] component.text_input = "hello world" component.count_words = True component.count_characters = True component.count_lines = True result = component.get_data() assert isinstance(result, Data) assert "word_count" in result.data def test_get_data_non_word_count(self): """Test get_data method for non-Word Count operation.""" component = TextOperations() component.operation = [{"name": "Case Conversion"}] component.text_input = "hello" result = component.get_data() assert isinstance(result, Data) assert result.data == {} def test_get_message(self): """Test get_message method.""" component = TextOperations() component.operation = [{"name": "Case Conversion"}] component.text_input = "hello" component.case_type = "uppercase" result = component.get_message() assert isinstance(result, Message) assert result.text == "HELLO" def test_get_dataframe(self): """Test get_dataframe method.""" component = TextOperations() component.operation = [{"name": "Text to DataFrame"}] component.text_input = "| A | B |\n| 1 | 2 |" component.table_separator = "|" component.has_header = True component.log = lambda _: None result = component.get_dataframe() assert isinstance(result, DataFrame) def test_get_text(self): """Test get_text method.""" component = TextOperations() component.operation = [{"name": "Text Join"}] component.text_input = "hello" component.text_input_2 = "world" result = component.get_text() assert isinstance(result, Message) assert result.text == "hello\nworld" # ============================================================================ # Bug Regression Tests # These tests ensure reported bugs remain fixed and don't regress. # ============================================================================ class TestBugFixWordCountEmptyText: """Bug #2: Word Count should return zeros for empty text.""" def test_word_count_empty_string_returns_zeros(self): """Empty text should return all zeros, not non-zero values.""" component = TextOperations() component.count_words = True component.count_characters = True component.count_lines = True result = component._word_count("") assert result["word_count"] == 0 assert result["unique_words"] == 0 assert result["character_count"] == 0 assert result["character_count_no_spaces"] == 0 assert result["line_count"] == 0 assert result["non_empty_lines"] == 0 def test_word_count_whitespace_only_returns_zeros(self): """Whitespace-only text should return zeros.""" component = TextOperations() component.count_words = True component.count_characters = True component.count_lines = True result = component._word_count(" \n\t\n ") assert result["word_count"] == 0 assert result["unique_words"] == 0 assert result["character_count"] == 0 assert result["character_count_no_spaces"] == 0 assert result["line_count"] == 0 assert result["non_empty_lines"] == 0 def test_process_text_allows_empty_for_word_count(self): """process_text should allow empty text for Word Count operation.""" component = TextOperations() component.text_input = "" component.operation = [{"name": "Word Count"}] component.count_words = True component.count_characters = True component.count_lines = True result = component.process_text() assert result is not None assert result["word_count"] == 0 class TestBugFixTextJoinEmptyFirst: """Bug #9: Text Join should return second text when first is empty.""" def test_process_text_allows_empty_for_text_join(self): """process_text should allow empty first text for Text Join.""" component = TextOperations() component.text_input = "" component.operation = [{"name": "Text Join"}] component.text_input_2 = "world" result = component.process_text() assert result == "world" class TestBugFixTextStripTabs: """Bug #8: Text Strip should remove tab characters.""" def test_strip_removes_tabs(self): """Strip should remove tabs when using default whitespace stripping.""" component = TextOperations() component.strip_mode = "both" component.strip_characters = "" result = component._text_strip("\t\thello world\t\t") assert result == "hello world" def test_strip_removes_mixed_whitespace(self): """Strip should remove all whitespace types including tabs and newlines.""" component = TextOperations() component.strip_mode = "both" component.strip_characters = "" result = component._text_strip("\n\t hello world \t\n") assert result == "hello world" class TestBugFixDataFrameHeaderValidation: """Bug #11: DataFrame should validate header column count matches data.""" def test_header_column_mismatch_raises_error(self): """Mismatched header/data columns should raise clear error.""" component = TextOperations() rows = [ ["Name Age City"], # 1 column (malformed header) ["John", "30", "NYC"], # 3 columns ] with pytest.raises(ValueError, match="Header mismatch"): component._create_dataframe(rows, has_header=True) def test_error_message_includes_column_counts(self): """Error message should include both column counts.""" component = TextOperations() rows = [ ["Name"], # 1 column ["John", "30"], # 2 columns ] with pytest.raises(ValueError, match=r"1 column\(s\) in header.*2 column\(s\) in data"): component._create_dataframe(rows, has_header=True) class TestBugFixInputValidation: """Tests for input validation improvements.""" def test_head_characters_has_range_spec(self): """head_characters should have range_spec with min=0.""" component = TextOperations() head_input = next( (inp for inp in component.inputs if inp.name == "head_characters"), None, ) assert head_input is not None assert head_input.range_spec is not None assert head_input.range_spec.min == 0 def test_tail_characters_has_range_spec(self): """tail_characters should have range_spec with min=0.""" component = TextOperations() tail_input = next( (inp for inp in component.inputs if inp.name == "tail_characters"), None, ) assert tail_input is not None assert tail_input.range_spec is not None assert tail_input.range_spec.min == 0 def test_text_input_uses_message_text_input(self): """Bug #1: text_input should use MessageTextInput type.""" component = TextOperations() text_input = next( (inp for inp in component.inputs if inp.name == "text_input"), None, ) assert text_input is not None # MessageTextInput is the correct type for variable input support assert text_input.name == "text_input"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "license": "MIT License", "lines": 592, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/components/processing/text_operations.py
import contextlib import re from typing import Any import pandas as pd from lfx.custom import Component from lfx.field_typing import RangeSpec from lfx.inputs import ( BoolInput, DropdownInput, IntInput, SortableListInput, StrInput, ) from lfx.inputs.inputs import MultilineInput from lfx.io import Output from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame from lfx.schema.message import Message class TextOperations(Component): display_name = "Text Operations" description = "Perform various text processing operations including text-to-DataFrame conversion." icon = "type" name = "TextOperations" # Configuration for operation-specific input fields OPERATION_FIELDS: dict[str, list[str]] = { "Text to DataFrame": ["table_separator", "has_header"], "Word Count": ["count_words", "count_characters", "count_lines"], "Case Conversion": ["case_type"], "Text Replace": ["search_pattern", "replacement_text", "use_regex"], "Text Extract": ["extract_pattern", "max_matches"], "Text Head": ["head_characters"], "Text Tail": ["tail_characters"], "Text Strip": ["strip_mode", "strip_characters"], "Text Join": ["text_input_2"], "Text Clean": ["remove_extra_spaces", "remove_special_chars", "remove_empty_lines"], } ALL_DYNAMIC_FIELDS: list[str] = [ "table_separator", "has_header", "count_words", "count_characters", "count_lines", "case_type", "search_pattern", "replacement_text", "use_regex", "extract_pattern", "max_matches", "head_characters", "tail_characters", "strip_mode", "strip_characters", "text_input_2", "remove_extra_spaces", "remove_special_chars", "remove_empty_lines", ] CASE_CONVERTERS: dict[str, Any] = { "uppercase": str.upper, "lowercase": str.lower, "title": str.title, "capitalize": str.capitalize, "swapcase": str.swapcase, } inputs = [ MultilineInput( name="text_input", display_name="Text Input", info="The input text to process.", required=True, ), SortableListInput( name="operation", display_name="Operation", placeholder="Select Operation", info="Select the text operation to perform.", options=[ {"name": "Word Count", "icon": "hash"}, {"name": "Case Conversion", "icon": "type"}, {"name": "Text Replace", "icon": "replace"}, {"name": "Text Extract", "icon": "search"}, {"name": "Text Head", "icon": "chevron-left"}, {"name": "Text Tail", "icon": "chevron-right"}, {"name": "Text Strip", "icon": "minus"}, {"name": "Text Join", "icon": "link"}, {"name": "Text Clean", "icon": "sparkles"}, {"name": "Text to DataFrame", "icon": "table"}, ], real_time_refresh=True, limit=1, ), StrInput( name="table_separator", display_name="Table Separator", info="Separator used in the table (default: '|').", value="|", dynamic=True, show=False, ), BoolInput( name="has_header", display_name="Has Header", info="Whether the table has a header row.", value=True, dynamic=True, advanced=True, show=False, ), BoolInput( name="count_words", display_name="Count Words", info="Include word count in analysis.", value=True, dynamic=True, advanced=True, show=False, ), BoolInput( name="count_characters", display_name="Count Characters", info="Include character count in analysis.", value=True, dynamic=True, advanced=True, show=False, ), BoolInput( name="count_lines", display_name="Count Lines", info="Include line count in analysis.", value=True, dynamic=True, advanced=True, show=False, ), DropdownInput( name="case_type", display_name="Case Type", options=["uppercase", "lowercase", "title", "capitalize", "swapcase"], value="lowercase", info="Type of case conversion to apply.", dynamic=True, show=False, ), BoolInput( name="use_regex", display_name="Use Regex", info="Whether to treat search pattern as regex.", value=False, dynamic=True, show=False, ), StrInput( name="search_pattern", display_name="Search Pattern", info="Text pattern to search for (supports regex).", dynamic=True, show=False, ), StrInput( name="replacement_text", display_name="Replacement Text", info="Text to replace the search pattern with.", dynamic=True, show=False, ), StrInput( name="extract_pattern", display_name="Extract Pattern", info="Regex pattern to extract from text.", dynamic=True, show=False, ), IntInput( name="max_matches", display_name="Max Matches", info="Maximum number of matches to extract.", value=10, dynamic=True, show=False, ), IntInput( name="head_characters", display_name="Characters from Start", info="Number of characters to extract from the beginning of text. Must be non-negative.", value=100, dynamic=True, show=False, range_spec=RangeSpec(min=0, max=1000000, step=1, step_type="int"), ), IntInput( name="tail_characters", display_name="Characters from End", info="Number of characters to extract from the end of text. Must be non-negative.", value=100, dynamic=True, show=False, range_spec=RangeSpec(min=0, max=1000000, step=1, step_type="int"), ), DropdownInput( name="strip_mode", display_name="Strip Mode", options=["both", "left", "right"], value="both", info="Which sides to strip whitespace from.", dynamic=True, show=False, ), StrInput( name="strip_characters", display_name="Characters to Strip", info="Specific characters to remove (leave empty for whitespace).", value="", dynamic=True, show=False, ), MultilineInput( name="text_input_2", display_name="Second Text Input", info="Second text to join with the first text.", dynamic=True, show=False, ), BoolInput( name="remove_extra_spaces", display_name="Remove Extra Spaces", info="Remove multiple consecutive spaces.", value=True, dynamic=True, show=False, ), BoolInput( name="remove_special_chars", display_name="Remove Special Characters", info="Remove special characters except alphanumeric and spaces.", value=False, dynamic=True, show=False, ), BoolInput( name="remove_empty_lines", display_name="Remove Empty Lines", info="Remove empty lines from text.", value=False, dynamic=True, show=False, ), ] outputs = [] def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict: """Update build configuration to show/hide relevant inputs based on operation.""" for field in self.ALL_DYNAMIC_FIELDS: if field in build_config: build_config[field]["show"] = False if field_name != "operation": return build_config operation_name = self._extract_operation_name(field_value) if not operation_name: return build_config fields_to_show = self.OPERATION_FIELDS.get(operation_name, []) for field in fields_to_show: if field in build_config: build_config[field]["show"] = True return build_config def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict: """Create dynamic outputs based on selected operation.""" if field_name != "operation": return frontend_node frontend_node["outputs"] = [] operation_name = self._extract_operation_name(field_value) if operation_name == "Word Count": frontend_node["outputs"].append(Output(display_name="Data", name="data", method="get_data")) elif operation_name == "Text to DataFrame": frontend_node["outputs"].append(Output(display_name="DataFrame", name="dataframe", method="get_dataframe")) elif operation_name == "Text Join": frontend_node["outputs"].append(Output(display_name="Text", name="text", method="get_text")) frontend_node["outputs"].append(Output(display_name="Message", name="message", method="get_message")) elif operation_name: frontend_node["outputs"].append(Output(display_name="Message", name="message", method="get_message")) return frontend_node def _extract_operation_name(self, field_value: Any) -> str: """Extract operation name from SortableListInput value.""" if isinstance(field_value, list) and len(field_value) > 0: return field_value[0].get("name", "") return "" def get_operation_name(self) -> str: """Get the selected operation name.""" operation_input = getattr(self, "operation", []) return self._extract_operation_name(operation_input) def process_text(self) -> Any: """Process text based on selected operation.""" text = getattr(self, "text_input", "") operation = self.get_operation_name() # Allow empty text for Text Join (second input might have content) # and Word Count (should return zeros for empty text) if not text and operation not in ("Text Join", "Word Count"): return None operation_handlers = { "Text to DataFrame": self._text_to_dataframe, "Word Count": self._word_count, "Case Conversion": self._case_conversion, "Text Replace": self._text_replace, "Text Extract": self._text_extract, "Text Head": self._text_head, "Text Tail": self._text_tail, "Text Strip": self._text_strip, "Text Join": self._text_join, "Text Clean": self._text_clean, } handler = operation_handlers.get(operation) if handler: return handler(text) return text def _text_to_dataframe(self, text: str) -> DataFrame: """Convert markdown-style table text to DataFrame.""" lines = [line.strip() for line in text.strip().split("\n") if line.strip()] if not lines: return DataFrame(pd.DataFrame()) separator = getattr(self, "table_separator", "|") has_header = getattr(self, "has_header", True) rows = self._parse_table_rows(lines, separator) if not rows: return DataFrame(pd.DataFrame()) df = self._create_dataframe(rows, has_header=has_header) self._convert_numeric_columns(df) self.log(f"Converted text to DataFrame: {len(df)} rows, {len(df.columns)} columns") return DataFrame(df) def _parse_table_rows(self, lines: list[str], separator: str) -> list[list[str]]: """Parse table lines into rows of cells.""" rows = [] for line in lines: cleaned_line = line.strip(separator) cells = [cell.strip() for cell in cleaned_line.split(separator)] rows.append(cells) return rows def _create_dataframe(self, rows: list[list[str]], *, has_header: bool) -> pd.DataFrame: """Create DataFrame from parsed rows.""" if has_header and len(rows) > 1: header = rows[0] data_rows = rows[1:] header_col_count = len(header) # Validate that all data rows have the same number of columns as header for i, row in enumerate(data_rows): row_col_count = len(row) if row_col_count != header_col_count: msg = ( f"Header mismatch: {header_col_count} column(s) in header vs " f"{row_col_count} column(s) in data row {i + 1}. " "Please ensure the header has the same number of columns as your data." ) raise ValueError(msg) return pd.DataFrame(data_rows, columns=header) max_cols = max(len(row) for row in rows) if rows else 0 columns = [f"col_{i}" for i in range(max_cols)] return pd.DataFrame(rows, columns=columns) def _convert_numeric_columns(self, df: pd.DataFrame) -> None: """Attempt to convert string columns to numeric where possible.""" for col in df.columns: with contextlib.suppress(ValueError, TypeError): df[col] = pd.to_numeric(df[col]) def _word_count(self, text: str) -> dict[str, Any]: """Count words, characters, and lines in text.""" result: dict[str, Any] = {} # Handle empty or whitespace-only text - return zeros text_str = str(text) if text else "" is_empty = not text_str or not text_str.strip() if getattr(self, "count_words", True): if is_empty: result["word_count"] = 0 result["unique_words"] = 0 else: words = text_str.split() result["word_count"] = len(words) result["unique_words"] = len(set(words)) if getattr(self, "count_characters", True): if is_empty: result["character_count"] = 0 result["character_count_no_spaces"] = 0 else: result["character_count"] = len(text_str) result["character_count_no_spaces"] = len(text_str.replace(" ", "")) if getattr(self, "count_lines", True): if is_empty: result["line_count"] = 0 result["non_empty_lines"] = 0 else: lines = text_str.split("\n") result["line_count"] = len(lines) result["non_empty_lines"] = len([line for line in lines if line.strip()]) return result def _case_conversion(self, text: str) -> str: """Convert text case.""" case_type = getattr(self, "case_type", "lowercase") converter = self.CASE_CONVERTERS.get(case_type) return converter(text) if converter else text def _text_replace(self, text: str) -> str: """Replace text patterns.""" search_pattern = getattr(self, "search_pattern", "") if not search_pattern: return text replacement_text = getattr(self, "replacement_text", "") use_regex = getattr(self, "use_regex", False) if use_regex: try: return re.sub(search_pattern, replacement_text, text) except re.error as e: self.log(f"Invalid regex pattern: {e}") return text return text.replace(search_pattern, replacement_text) def _text_extract(self, text: str) -> list[str]: """Extract text matching patterns.""" extract_pattern = getattr(self, "extract_pattern", "") if not extract_pattern: return [] max_matches = getattr(self, "max_matches", 10) try: matches = re.findall(extract_pattern, text) except re.error as e: msg = f"Invalid regex pattern '{extract_pattern}': {e}" raise ValueError(msg) from e return matches[:max_matches] if max_matches > 0 else matches def _text_head(self, text: str) -> str: """Extract characters from the beginning of text.""" head_characters = getattr(self, "head_characters", 100) if head_characters < 0: msg = f"Characters from Start must be a non-negative integer, got {head_characters}" raise ValueError(msg) if head_characters == 0: return "" return text[:head_characters] def _text_tail(self, text: str) -> str: """Extract characters from the end of text.""" tail_characters = getattr(self, "tail_characters", 100) if tail_characters < 0: msg = f"Characters from End must be a non-negative integer, got {tail_characters}" raise ValueError(msg) if tail_characters == 0: return "" return text[-tail_characters:] def _text_strip(self, text: str) -> str: """Remove whitespace or specific characters from text edges.""" strip_mode = getattr(self, "strip_mode", "both") strip_characters = getattr(self, "strip_characters", "") # Convert to string to ensure proper handling text_str = str(text) if text else "" # None means strip all whitespace (spaces, tabs, newlines, etc.) chars_to_strip = strip_characters if strip_characters else None if strip_mode == "left": return text_str.lstrip(chars_to_strip) if strip_mode == "right": return text_str.rstrip(chars_to_strip) # Default: "both" return text_str.strip(chars_to_strip) def _text_join(self, text: str) -> str: """Join two texts with line break separator.""" text_input_2 = getattr(self, "text_input_2", "") text1 = str(text) if text else "" text2 = str(text_input_2) if text_input_2 else "" if text1 and text2: return f"{text1}\n{text2}" return text1 or text2 def _text_clean(self, text: str) -> str: """Clean text by removing extra spaces, special chars, etc.""" result = text if getattr(self, "remove_extra_spaces", True): result = re.sub(r"\s+", " ", result) if getattr(self, "remove_special_chars", False): # Remove ALL special characters except alphanumeric and spaces result = re.sub(r"[^\w\s]", "", result) if getattr(self, "remove_empty_lines", False): lines = [line for line in result.split("\n") if line.strip()] result = "\n".join(lines) return result def _format_result_as_text(self, result: Any) -> str: """Format result as text string.""" if result is None: return "" if isinstance(result, list): return "\n".join(str(item) for item in result) return str(result) def get_dataframe(self) -> DataFrame: """Return result as DataFrame - only for Text to DataFrame operation.""" if self.get_operation_name() != "Text to DataFrame": return DataFrame(pd.DataFrame()) text = getattr(self, "text_input", "") if not text: return DataFrame(pd.DataFrame()) return self._text_to_dataframe(text) def get_text(self) -> Message: """Return result as Message - for text operations only.""" result = self.process_text() return Message(text=self._format_result_as_text(result)) def get_data(self) -> Data: """Return result as Data object - only for Word Count operation.""" if self.get_operation_name() != "Word Count": return Data(data={}) result = self.process_text() if result is None: return Data(data={}) if isinstance(result, dict): return Data(data=result) if isinstance(result, list): return Data(data={"items": result}) return Data(data={"result": str(result)}) def get_message(self) -> Message: """Return result as simple message with the processed text.""" result = self.process_text() return Message(text=self._format_result_as_text(result))
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/components/processing/text_operations.py", "license": "MIT License", "lines": 508, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/lfx/src/lfx/events/observability/lifecycle_events.py
import functools from collections.abc import Awaitable, Callable from typing import Any from ag_ui.encoder.encoder import EventEncoder from lfx.log.logger import logger AsyncMethod = Callable[..., Awaitable[Any]] encoder: EventEncoder = EventEncoder() def observable(observed_method: AsyncMethod) -> AsyncMethod: """Decorator to make an async method observable by emitting lifecycle events. Decorated classes are expected to implement specific methods to emit AGUI events: - `before_callback_event(*args, **kwargs)`: Called before the decorated method executes. It should return a dictionary representing the event payload. - `after_callback_event(result, *args, **kwargs)`: Called after the decorated method successfully completes. It should return a dictionary representing the event payload. The `result` of the decorated method is passed as the first argument. - `error_callback_event(exception, *args, **kwargs)`: (Optional) Called if the decorated method raises an exception. It should return a dictionary representing the error event payload. The `exception` is passed as the first argument. If these methods are implemented, the decorator will call them to generate event payloads. If an implementation is missing, the corresponding event publishing will be skipped without error. Payloads returned by these methods can include custom metrics by placing them under the 'langflow' key within the 'raw_events' dictionary. Example: class MyClass: display_name = "My Observable Class" def before_callback_event(self, *args, **kwargs): return {"event_name": "my_method_started", "data": {"input_args": args}} async def my_method(self, event_manager: EventManager, data: str): # ... method logic ... return "processed_data" def after_callback_event(self, result, *args, **kwargs): return {"event_name": "my_method_completed", "data": {"output": result}} def error_callback_event(self, exception, *args, **kwargs): return {"event_name": "my_method_failed", "error": str(exception)} @observable async def my_observable_method(self, event_manager: EventManager, data: str): # ... method logic ... pass """ async def check_event_manager(self, **kwargs): if "event_manager" not in kwargs or kwargs["event_manager"] is None: await logger.awarning( f"EventManager not available/provided, skipping observable event publishing " f"from {self.__class__.__name__}" ) return False return True async def before_callback(self, *args, **kwargs): if not await check_event_manager(self, **kwargs): return if hasattr(self, "before_callback_event"): event_payload = self.before_callback_event(*args, **kwargs) event_payload = encoder.encode(event_payload) # TODO: Publish event per request, would required context based queues else: await logger.awarning( f"before_callback_event not implemented for {self.__class__.__name__}. Skipping event publishing." ) async def after_callback(self, res: Any | None = None, *args, **kwargs): if not await check_event_manager(self, **kwargs): return if hasattr(self, "after_callback_event"): event_payload = self.after_callback_event(res, *args, **kwargs) event_payload = encoder.encode(event_payload) # TODO: Publish event per request, would required context based queues else: await logger.awarning( f"after_callback_event not implemented for {self.__class__.__name__}. Skipping event publishing." ) @functools.wraps(observed_method) async def wrapper(self, *args, **kwargs): await before_callback(self, *args, **kwargs) result = None try: result = await observed_method(self, *args, **kwargs) await after_callback(self, result, *args, **kwargs) except Exception as e: await logger.aerror(f"Exception in {self.__class__.__name__}: {e}") if hasattr(self, "error_callback_event"): try: event_payload = self.error_callback_event(e, *args, **kwargs) event_payload = encoder.encode(event_payload) # TODO: Publish event per request, would required context based queues except Exception as callback_e: # noqa: BLE001 await logger.aerror( f"Exception during error_callback_event for {self.__class__.__name__}: {callback_e}" ) raise return result return wrapper
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/events/observability/lifecycle_events.py", "license": "MIT License", "lines": 90, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/lfx/tests/unit/events/observability/test_lifecycle_events.py
import asyncio from typing import Any from unittest.mock import AsyncMock, MagicMock, patch import pytest from ag_ui.core import CustomEvent, StepFinishedEvent, StepStartedEvent # Import the actual decorator we want to test from lfx.events.observability.lifecycle_events import observable # Mock classes for dependencies class MockEventManager: """Mock for lfx.events.event_manager.EventManager.""" def __init__(self): # We'll use AsyncMock for publish self.publish = AsyncMock() class MockLogger: """Mock for lfx.log.logger.logger.""" def __init__(self): self.awarning = AsyncMock() self.aerror = AsyncMock() # --- Pytest Fixtures --- @pytest.fixture def mock_dependencies(): """Provides mocked instances of external dependencies and patches them.""" # 1. Logger Mock mock_logger_instance = MockLogger() # 2. EventManager Mock mock_event_manager = MockEventManager() # 3. Encoder Mock - create a mock instance with a mocked encode method mock_encoder_instance = MagicMock() # The encode method should return a string (SSE format) mock_encoder_instance.encode = MagicMock(side_effect=lambda payload: f"data: {payload}\n\n") # Patch the actual imports in the lifecycle_events module with ( patch("lfx.events.observability.lifecycle_events.logger", mock_logger_instance), patch("lfx.events.observability.lifecycle_events.encoder", mock_encoder_instance), ): yield { "event_manager": mock_event_manager, "logger": mock_logger_instance, "encoder": mock_encoder_instance, } @pytest.fixture(autouse=True) def reset_mocks(mock_dependencies): """Resets the state of the mocks before each test.""" # Ensure all mocks are reset before test execution mock_dependencies["logger"].awarning.reset_mock() mock_dependencies["logger"].aerror.reset_mock() mock_dependencies["encoder"].encode.reset_mock() # --- Test Classes (remain largely the same, but now used by pytest functions) --- class TestClassWithCallbacks: display_name = "ObservableTest" def before_callback_event(self, *args, **kwargs): return StepStartedEvent( step_name=self.display_name, raw_event={"lifecycle": "start", "args_len": len(args), "kw_keys": list(kwargs.keys())}, ) def after_callback_event(self, result: Any, *args, **kwargs): # noqa: ARG002 return StepFinishedEvent( step_name=self.display_name, raw_event={"lifecycle": "end", "result": result, "kw_keys": list(kwargs.keys())}, ) def error_callback_event(self, exception: Exception, *args, **kwargs): # noqa: ARG002 return CustomEvent( name="error", value={ "error": str(exception), "error_type": type(exception).__name__, }, raw_event={"lifecycle": "error", "kw_keys": list(kwargs.keys())}, ) # Mock observable method @observable async def run_success(self, event_manager: MockEventManager, data: str) -> str: # noqa: ARG002 await asyncio.sleep(0.001) return f"Processed:{data}" @observable async def run_exception(self, event_manager: MockEventManager, data: str) -> str: # noqa: ARG002 await asyncio.sleep(0.001) raise ValueError class TestClassWithoutCallbacks: display_name = "NonObservableTest" @observable async def run_success(self, event_manager: MockEventManager, data: str) -> str: # noqa: ARG002 await asyncio.sleep(0.001) return f"Processed:{data}" # --- Pytest Test Functions --- # Use pytest.mark.asyncio for running async functions @pytest.mark.asyncio async def test_successful_run_with_callbacks(mock_dependencies): instance = TestClassWithCallbacks() data = "test_data" event_manager = mock_dependencies["event_manager"] result = await instance.run_success(event_manager=event_manager, data=data) # 1. Assert result assert result == f"Processed:{data}" # 2. Assert encoder was called twice (once for BEFORE, once for AFTER) assert mock_dependencies["encoder"].encode.call_count == 2 # 3. Verify the encoder was called with the correct payloads encoder_instance = mock_dependencies["encoder"] assert encoder_instance.encode.call_count == 2 # Get the actual calls to encode encode_calls = encoder_instance.encode.call_args_list # First call should be the BEFORE event (StepStartedEvent) before_event = encode_calls[0][0][0] assert isinstance(before_event, StepStartedEvent) assert before_event.step_name == "ObservableTest" assert before_event.raw_event["lifecycle"] == "start" assert before_event.raw_event["args_len"] == 0 assert "event_manager" in before_event.raw_event["kw_keys"] assert "data" in before_event.raw_event["kw_keys"] # Second call should be the AFTER event (StepFinishedEvent) after_event = encode_calls[1][0][0] assert isinstance(after_event, StepFinishedEvent) assert after_event.step_name == "ObservableTest" assert after_event.raw_event["lifecycle"] == "end" assert after_event.raw_event["result"] == f"Processed:{data}" assert "event_manager" in after_event.raw_event["kw_keys"] assert "data" in after_event.raw_event["kw_keys"] # 4. Assert no warnings or errors were logged mock_dependencies["logger"].awarning.assert_not_called() mock_dependencies["logger"].aerror.assert_not_called() @pytest.mark.asyncio async def test_exception_run_with_callbacks(mock_dependencies): instance = TestClassWithCallbacks() event_manager = mock_dependencies["event_manager"] # The decorator now re-raises the exception after logging and encoding the error event with pytest.raises(ValueError): # noqa: PT011 await instance.run_exception(event_manager=event_manager, data="fail_data") # 1. Assert error was logged mock_dependencies["logger"].aerror.assert_called_once() mock_dependencies["logger"].aerror.assert_called_with("Exception in TestClassWithCallbacks: ") # 2. Assert encoder was called twice (once for BEFORE event, once for ERROR event) assert mock_dependencies["encoder"].encode.call_count == 2 # 3. Verify the encoder was called with the correct payloads encoder_instance = mock_dependencies["encoder"] assert encoder_instance.encode.call_count == 2 # Get the actual calls to encode encode_calls = encoder_instance.encode.call_args_list # First call should be the BEFORE event (StepStartedEvent) before_event = encode_calls[0][0][0] assert isinstance(before_event, StepStartedEvent) assert before_event.raw_event["lifecycle"] == "start" # Second call should be the ERROR event (CustomEvent) error_event = encode_calls[1][0][0] assert isinstance(error_event, CustomEvent) assert error_event.name == "error" assert error_event.value["error"] == "" assert error_event.value["error_type"] == "ValueError" assert error_event.raw_event["lifecycle"] == "error" # 4. Assert no warnings were logged mock_dependencies["logger"].awarning.assert_not_called() @pytest.mark.asyncio async def test_run_without_event_manager(mock_dependencies): instance = TestClassWithCallbacks() data = "no_manager" # No event_manager passed (or explicitly passed as None) result = await instance.run_success(event_manager=None, data=data) # 1. Assert result is correct assert result == f"Processed:{data}" # 2. Assert warning for missing EventManager was logged twice (once for before, once for after) assert mock_dependencies["logger"].awarning.call_count == 2 mock_dependencies["logger"].awarning.assert_any_call( "EventManager not available/provided, skipping observable event publishing from TestClassWithCallbacks" ) @pytest.mark.asyncio async def test_run_without_callbacks(mock_dependencies): instance = TestClassWithoutCallbacks() data = "no_callbacks" event_manager = mock_dependencies["event_manager"] # Run the method with a manager result = await instance.run_success(event_manager=event_manager, data=data) # 1. Assert result is correct assert result == f"Processed:{data}" # 2. Assert warnings for missing callbacks were logged assert mock_dependencies["logger"].awarning.call_count == 2 mock_dependencies["logger"].awarning.assert_any_call( "before_callback_event not implemented for TestClassWithoutCallbacks. Skipping event publishing." ) mock_dependencies["logger"].awarning.assert_any_call( "after_callback_event not implemented for TestClassWithoutCallbacks. Skipping event publishing." ) # 3. Assert no errors were logged mock_dependencies["logger"].aerror.assert_not_called()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/events/observability/test_lifecycle_events.py", "license": "MIT License", "lines": 182, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/api/v2/workflow.py
"""V2 Workflow execution endpoints. This module implements the V2 Workflow API endpoints for executing flows with enhanced error handling, timeout protection, and structured responses. Endpoints: POST /workflow: Execute a workflow (sync, stream, or background modes) GET /workflow: Get workflow job status by job_id POST /workflow/stop: Stop a running workflow execution Features: - Developer API protection (requires developer_api_enabled setting) - Comprehensive error handling with structured error responses - Timeout protection for long-running executions - Support for multiple execution modes (sync, stream, background) - API key authentication required for all endpoints Configuration: EXECUTION_TIMEOUT: Maximum execution time for synchronous workflows (300 seconds) """ from __future__ import annotations import asyncio from copy import deepcopy from typing import Annotated from uuid import UUID, uuid4 from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Query, Request, status from fastapi.responses import StreamingResponse from lfx.graph.graph.base import Graph from lfx.schema.workflow import ( WORKFLOW_EXECUTION_RESPONSES, WORKFLOW_STATUS_RESPONSES, JobId, JobStatus, WorkflowExecutionRequest, WorkflowExecutionResponse, WorkflowJobResponse, WorkflowStopRequest, WorkflowStopResponse, ) from lfx.services.deps import get_settings_service, injectable_session_scope_readonly from pydantic_core import ValidationError as PydanticValidationError from sqlalchemy.exc import OperationalError from langflow.api.utils import extract_global_variables_from_headers from langflow.api.v1.schemas import RunResponse from langflow.api.v2.converters import ( create_error_response, parse_flat_inputs, run_response_to_workflow_response, ) from langflow.api.v2.workflow_reconstruction import reconstruct_workflow_response_from_job_id from langflow.exceptions.api import ( WorkflowQueueFullError, WorkflowResourceError, WorkflowServiceUnavailableError, WorkflowTimeoutError, WorkflowValidationError, ) from langflow.helpers.flow import get_flow_by_id_or_endpoint_name from langflow.processing.process import process_tweaks, run_graph_internal from langflow.services.auth.utils import api_key_security from langflow.services.database.models.flow.model import FlowRead from langflow.services.database.models.jobs.model import JobType from langflow.services.database.models.user.model import UserRead from langflow.services.deps import get_job_service, get_task_service # Configuration constants EXECUTION_TIMEOUT = 300 # 5 minutes default timeout for sync execution def check_developer_api_enabled() -> None: """Check if developer API is enabled. This dependency function protects all workflow endpoints by verifying that the developer API feature is enabled in the application settings. Raises: HTTPException: 403 Forbidden if developer_api_enabled setting is False Note: This is used as a router-level dependency to protect all workflow endpoints. """ settings = get_settings_service().settings if not settings.developer_api_enabled: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail={ "error": "Developer API disabled", "code": "DEVELOPER_API_DISABLED", "message": "Developer API is not enabled. Contact administrator to enable this feature.", }, ) router = APIRouter(prefix="/workflows", tags=["Workflow"], dependencies=[Depends(check_developer_api_enabled)]) @router.post( "", response_model=None, response_model_exclude_none=True, responses=WORKFLOW_EXECUTION_RESPONSES, summary="Execute Workflow", description="Execute a workflow with support for sync, stream, and background modes", ) async def execute_workflow( workflow_request: WorkflowExecutionRequest, background_tasks: BackgroundTasks, http_request: Request, api_key_user: Annotated[UserRead, Depends(api_key_security)], ) -> WorkflowExecutionResponse | WorkflowJobResponse | StreamingResponse: """Execute a workflow with support for multiple execution modes. **background** and **stream** can't be true at the same time. This endpoint supports three execution modes: - **Synchronous** (background=False, stream=False): Returns complete results immediately - **Streaming** (stream=True): Returns server-sent events in real-time (not yet implemented) - **Background** (background=True): Starts job and returns job ID (not yet implemented) Error Handling Strategy: - System errors (404, 500, 503, 504): Returned as HTTP error responses - Component execution errors: Returned as HTTP 200 with errors in response body Args: workflow_request: The workflow execution request containing flow_id, inputs, and mode flags background_tasks: FastAPI background tasks for async operations http_request: The HTTP request object for extracting headers api_key_user: Authenticated user from API key Returns: - WorkflowExecutionResponse: For synchronous execution (HTTP 200) - WorkflowJobResponse: For background execution (HTTP 202, not yet implemented) - StreamingResponse: For streaming execution (not yet implemented) Raises: HTTPException: - 403: Developer API disabled - 404: Flow not found or user lacks access - 500: Invalid flow data or validation error - 501: Streaming or background mode not yet implemented - 503: Database unavailable - 504: Execution timeout exceeded """ job_id = uuid4() try: # Validate flow exists and user has permission flow = await get_flow_by_id_or_endpoint_name(workflow_request.flow_id, api_key_user.id) # Background mode execution if workflow_request.background: return await execute_workflow_background( workflow_request=workflow_request, flow=flow, job_id=job_id, api_key_user=api_key_user, http_request=http_request, ) # Streaming mode (to be implemented) if workflow_request.stream: raise HTTPException( status_code=status.HTTP_501_NOT_IMPLEMENTED, detail={ "error": "Not implemented", "code": "NOT_IMPLEMENTED", "message": "Streaming execution not yet implemented", }, ) # Synchronous execution (default) return await execute_sync_workflow_with_timeout( workflow_request=workflow_request, flow=flow, job_id=job_id, api_key_user=api_key_user, background_tasks=background_tasks, http_request=http_request, ) except HTTPException as e: # Reformat 404 from get_flow_by_id_or_endpoint_name to structured format if e.status_code == status.HTTP_404_NOT_FOUND: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={ "error": "Flow not found", "code": "FLOW_NOT_FOUND", "message": f"Flow '{workflow_request.flow_id}' does not exist. Verify the flow_id and try again.", "flow_id": workflow_request.flow_id, }, ) from e raise except OperationalError as e: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail={ "error": "Service unavailable, Please try again.", "code": "DATABASE_ERROR", "message": f"Failed to fetch flow: {e!s}", "flow_id": workflow_request.flow_id, }, ) from e except WorkflowTimeoutError: raise HTTPException( status_code=status.HTTP_408_REQUEST_TIMEOUT, detail={ "error": "Execution timeout", "code": "EXECUTION_TIMEOUT", "message": f"Workflow execution exceeded {EXECUTION_TIMEOUT} seconds", "job_id": str(job_id), "flow_id": str(workflow_request.flow_id), "timeout_seconds": EXECUTION_TIMEOUT, }, ) from None except (PydanticValidationError, WorkflowValidationError) as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ "error": "Workflow validation error", "code": "INVALID_FLOW_DATA", "message": str(e), "flow_id": workflow_request.flow_id, }, ) from e except WorkflowServiceUnavailableError as err: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail={ "error": "Service unavailable", "code": "QUEUE_SERVICE_UNAVAILABLE", "message": str(err), "flow_id": workflow_request.flow_id, }, ) from err except (WorkflowResourceError, WorkflowQueueFullError, MemoryError) as err: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail={ "error": "Service busy", "code": "SERVICE_BUSY", "message": "The service is currently unable to handle the request due to resource limits.", "flow_id": workflow_request.flow_id, }, ) from err except Exception as err: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Internal server error", "code": "INTERNAL_SERVER_ERROR", "message": f"An unexpected error occurred: {err!s}", "flow_id": workflow_request.flow_id, }, ) from err async def execute_sync_workflow_with_timeout( workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: UUID, api_key_user: UserRead, background_tasks: BackgroundTasks, http_request: Request, ) -> WorkflowExecutionResponse: """Execute workflow with timeout protection. Args: workflow_request: The workflow execution request flow: The flow to execute job_id: Generated job ID for tracking api_key_user: Authenticated user background_tasks: FastAPI background tasks http_request: The HTTP request object for extracting headers Returns: WorkflowExecutionResponse with complete results Raises: WorkflowTimeoutError: If execution exceeds timeout WorkflowValidationError: If flow validation fails """ try: return await asyncio.wait_for( execute_sync_workflow( workflow_request=workflow_request, flow=flow, job_id=job_id, api_key_user=api_key_user, background_tasks=background_tasks, http_request=http_request, ), timeout=EXECUTION_TIMEOUT, ) except asyncio.TimeoutError as e: raise WorkflowTimeoutError from e async def execute_sync_workflow( workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: UUID, api_key_user: UserRead, background_tasks: BackgroundTasks, # noqa: ARG001 http_request: Request, ) -> WorkflowExecutionResponse: """Execute workflow synchronously and return complete results. This function implements a two-tier error handling strategy: 1. System-level errors (validation, graph build): Raised as exceptions 2. Component execution errors: Returned in response body with HTTP 200 This approach allows clients to receive partial results even when some components fail, which is useful for debugging and incremental processing. Execution Flow: 1. Parse flat inputs into tweaks and session_id 2. Validate flow data exists 3. Extract context from HTTP headers 4. Build graph from flow data with tweaks applied 5. Identify terminal nodes for execution 6. Execute graph and collect results 7. Convert V1 RunResponse to V2 WorkflowExecutionResponse Args: workflow_request: The workflow execution request with inputs and configuration flow: The flow model from database job_id: Generated job ID for tracking this execution api_key_user: Authenticated user for permission checks background_tasks: FastAPI background tasks (unused in sync mode) http_request: The HTTP request object for extracting headers Returns: WorkflowExecutionResponse: Complete execution results with outputs and metadata Raises: WorkflowValidationError: If flow data is None or graph build fails """ # Parse flat inputs structure tweaks, session_id = parse_flat_inputs(workflow_request.inputs or {}) # Validate flow data - this is a system error, not execution error if flow.data is None: msg = f"Flow {flow.id} has no data. The flow may be corrupted." raise WorkflowValidationError(msg) # Extract request-level variables from headers (similar to V1) # Headers with prefix X-LANGFLOW-GLOBAL-VAR-* are extracted and made available to components request_variables = extract_global_variables_from_headers(http_request.headers) # Build context from request variables (similar to V1's _run_flow_internal) context = {"request_variables": request_variables} if request_variables else None # Build graph - system error if this fails try: flow_id_str = str(flow.id) user_id = str(api_key_user.id) # Use deepcopy to prevent mutation of the original flow.data # process_tweaks modifies nested dictionaries in-place graph_data = deepcopy(flow.data) graph_data = process_tweaks(graph_data, tweaks, stream=False) # Pass context to graph (similar to V1's simple_run_flow) # This allows components to access request metadata via graph.context graph = Graph.from_payload( graph_data, flow_id=flow_id_str, user_id=user_id, flow_name=flow.name, context=context ) # Set run_id for tracing/logging (similar to V1's simple_run_flow) graph.set_run_id(job_id) except Exception as e: msg = f"Failed to build graph from flow data: {e!s}" raise WorkflowValidationError(msg) from e # Get terminal nodes - these are the outputs we want terminal_node_ids = graph.get_terminal_nodes() # Execute graph - component errors are caught and returned in response body job_service = get_job_service() await job_service.create_job(job_id=job_id, flow_id=flow_id_str) try: task_result, execution_session_id = await job_service.execute_with_status( job_id=job_id, run_coro_func=run_graph_internal, graph=graph, flow_id=flow_id_str, session_id=session_id, inputs=None, outputs=terminal_node_ids, stream=False, ) # Build RunResponse run_response = RunResponse(outputs=task_result, session_id=execution_session_id) # Convert to WorkflowExecutionResponse return run_response_to_workflow_response( run_response=run_response, flow_id=workflow_request.flow_id, job_id=str(job_id), workflow_request=workflow_request, graph=graph, ) except asyncio.CancelledError: # Re-raise CancelledError to allow timeout mechanism to work properly # This ensures asyncio.wait_for() can properly cancel and raise TimeoutError raise except asyncio.TimeoutError as e: # Re-raise TimeoutError to allow timeout mechanism to work properly # This ensures asyncio.wait_for() can properly cancel and raise TimeoutError raise WorkflowTimeoutError from e except Exception as exc: # noqa: BLE001 # Component execution errors - return in response body with HTTP 200 # This allows partial results and detailed error information per component return create_error_response( flow_id=workflow_request.flow_id, job_id=job_id, workflow_request=workflow_request, error=exc, ) async def execute_workflow_background( workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: JobId, api_key_user: UserRead, http_request: Request, ) -> WorkflowJobResponse: """Execute workflow in the background and return job ID for the user to track the execution status.""" try: # Parse flat inputs structure tweaks, session_id = parse_flat_inputs(workflow_request.inputs or {}) # Validate flow data if flow.data is None: msg = f"Flow {flow.id} has no data" raise ValueError(msg) # Extract request-level variables from headers (similar to V1) # Headers with prefix X-LANGFLOW-GLOBAL-VAR-* are extracted and made available to components request_variables = extract_global_variables_from_headers(http_request.headers) # Build context from request variables (similar to V1's _run_flow_internal) context = {"request_variables": request_variables} if request_variables else None # Build the graph once flow_id_str = str(flow.id) user_id = str(api_key_user.id) graph_data = deepcopy(flow.data) graph_data = process_tweaks(graph_data, tweaks, stream=False) graph = Graph.from_payload( graph_data, flow_id=flow_id_str, user_id=user_id, flow_name=flow.name, context=context ) graph.set_run_id(job_id) # Get terminal nodes terminal_node_ids = graph.get_terminal_nodes() # Launch background task task_service = get_task_service() job_service = get_job_service() # Create job synchronously to ensure it exists before background task starts # and so we can return a valid job status immediately await job_service.create_job( job_id=job_id, flow_id=flow_id_str, ) await task_service.fire_and_forget_task( job_service.execute_with_status, job_id=job_id, run_coro_func=run_graph_internal, graph=graph, flow_id=flow_id_str, session_id=session_id, inputs=None, outputs=terminal_node_ids, stream=False, ) status = JobStatus.QUEUED return WorkflowJobResponse(job_id=str(job_id), flow_id=workflow_request.flow_id, status=status) except (WorkflowResourceError, WorkflowServiceUnavailableError, WorkflowQueueFullError): # Re-raise infrastructure/resource errors to be handled by the endpoint raise except MemoryError as exc: raise WorkflowResourceError from exc @router.get( "", response_model=None, response_model_exclude_none=True, responses=WORKFLOW_STATUS_RESPONSES, summary="Get Workflow Status", description="Get status of workflow job by job ID", ) async def get_workflow_status( api_key_user: Annotated[UserRead, Depends(api_key_security)], job_id: Annotated[JobId | None, Query(description="Job ID to query")] = None, session: Annotated[object, Depends(injectable_session_scope_readonly)] = None, ) -> WorkflowExecutionResponse | WorkflowJobResponse: """Get workflow job status and results. Args: api_key_user: Authenticated user from API key job_id: Optional job ID to query specific job session: Database session for querying vertex builds Returns: WorkflowExecutionResponse or reconstructed results Raises: HTTPException: - 400: Job ID not provided - 403: Developer API disabled or unauthorized - 404: Job not found - 408: Execution timeout - 500: Internal server error or Job failure """ if not job_id: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ "error": "Missing required parameter", "code": "MISSING_PARAMETER", "message": "Job ID must be provided", }, ) job_service = get_job_service() try: job = await job_service.get_job_by_job_id(job_id=job_id) except Exception as exc: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Internal server error", "code": "INTERNAL_SERVER_ERROR", "message": f"Failed to retrieve job from database: {exc!s}", }, ) from exc if not job: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={ "error": "Workflow job not found", "code": "JOB_NOT_FOUND", "message": f"Workflow job {job_id} not found", "job_id": str(job_id), }, ) # Verify this is a workflow job if job.type != JobType.WORKFLOW: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={ "error": "Workflow job not found", "code": "JOB_NOT_FOUND", "message": f"Job {job_id} is not a workflow job (type: {job.type})", "job_id": str(job_id), }, ) # Store context for exception handling scope flow_id_str = str(job.flow_id) job_id_str = str(job_id) try: # If job is completed, reconstruct full workflow response from vertex_builds if job.status == JobStatus.COMPLETED: # Get the flow flow = await get_flow_by_id_or_endpoint_name(flow_id_str, api_key_user.id) # Reconstruct response from vertex_build table return await reconstruct_workflow_response_from_job_id( session=session, flow=flow, job_id=job_id_str, user_id=str(api_key_user.id), ) if job.status == JobStatus.FAILED: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Job failed", "code": "JOB_FAILED", "message": f"Job {job_id_str} has failed execution.", "job_id": job_id_str, }, ) if job.status == JobStatus.TIMED_OUT: raise HTTPException( status_code=status.HTTP_408_REQUEST_TIMEOUT, detail={ "error": "Execution timeout", "code": "EXECUTION_TIMEOUT", "message": "Workflow execution timed out", "job_id": job_id_str, "flow_id": flow_id_str, }, ) # Default response for active statuses (QUEUED, IN_PROGRESS, etc.) return WorkflowJobResponse( flow_id=flow_id_str, job_id=job_id_str, status=job.status, ) except HTTPException: raise except WorkflowTimeoutError as err: raise HTTPException( status_code=status.HTTP_408_REQUEST_TIMEOUT, detail={ "error": "Execution timeout", "code": "EXECUTION_TIMEOUT", "message": f"Workflow execution exceeded {EXECUTION_TIMEOUT} seconds", "job_id": job_id_str, "flow_id": flow_id_str, "timeout_seconds": EXECUTION_TIMEOUT, }, ) from err except Exception as exc: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Internal server error", "code": "INTERNAL_SERVER_ERROR", "message": f"Failed to process job status: {exc!s}", }, ) from exc @router.post( "/stop", summary="Stop Workflow", description="Stop a running workflow execution", ) async def stop_workflow( request: WorkflowStopRequest, api_key_user: Annotated[UserRead, Depends(api_key_security)], # noqa: ARG001 ) -> WorkflowStopResponse: """Stop a running workflow execution by job_id. This endpoint allows clients to gracefully or forcefully stop a running workflow. Args: request: Stop request containing job_id and optional force flag api_key_user: Authenticated user from API key Returns: WorkflowStopResponse: Confirmation of stop request with final job status Raises: HTTPException: - 403: Developer API disabled or unauthorized - 404: Job ID not found - 500: Internal server error """ job_id = request.job_id job_service = get_job_service() task_service = get_task_service() try: # 1. Fetch Job job = await job_service.get_job_by_job_id(job_id) except Exception as exc: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Internal server error", "code": "INTERNAL_SERVER_ERROR", "message": f"Failed to retrieve job status: {exc!s}", }, ) from exc if not job: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail={ "error": "Job not found", "code": "JOB_NOT_FOUND", "message": f"Job {job_id} not found", "job_id": str(job_id), }, ) if job.status == JobStatus.CANCELLED: return WorkflowStopResponse(job_id=str(job_id), message=f"Job {job_id} is already cancelled.") try: revoked = await task_service.revoke_task(job_id) await job_service.update_job_status(job_id, JobStatus.CANCELLED) message = f"Job {job_id} cancelled successfully." if revoked else f"Job {job_id} is already cancelled." return WorkflowStopResponse(job_id=str(job_id), message=message) except asyncio.CancelledError as exc: # Handle system-initiated cancellations that were re-raised # The job status has already been updated to FAILED in jobs/service.py message_code = exc.args[0] if exc.args else "UNKNOWN" raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Task cancellation error", "code": message_code, "message": f"Job {job_id} was cancelled unexpectedly by the system", "job_id": str(job_id), }, ) from exc except Exception as exc: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={ "error": "Internal server error", "code": "INTERNAL_SERVER_ERROR", "message": f"Failed to stop job: {job_id} - {exc!s}", }, ) from exc
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/api/v2/workflow.py", "license": "MIT License", "lines": 641, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/api/v2/test_workflow.py
"""Comprehensive unit tests for V2 Workflow API endpoints. This test module provides extensive coverage of the workflow execution endpoints, including authentication, authorization, error handling, and execution modes. Test Coverage: - Developer API protection (enabled/disabled scenarios) - API key authentication requirements - Flow validation and error handling - Database error handling - Execution timeout protection - Synchronous execution with various component types - Error response structure validation - Multiple execution modes (sync, stream, background) Test Organization: - TestWorkflowDeveloperAPIProtection: Tests developer API feature flag - TestWorkflowErrorHandling: Tests comprehensive error scenarios - TestWorkflowSyncExecution: Tests successful execution flows Test Strategy: - Uses real database with proper cleanup - Mocks external dependencies (LLM APIs, file operations) - Tests both success and failure paths - Validates response structure and status codes """ import asyncio from datetime import datetime, timezone from unittest.mock import AsyncMock, MagicMock, patch from uuid import UUID, uuid4 import pytest from httpx import AsyncClient from langflow.exceptions.api import WorkflowValidationError from langflow.services.database.models.flow.model import Flow from langflow.services.database.models.jobs.model import JobType from lfx.schema.workflow import JobStatus from lfx.services.deps import session_scope from sqlalchemy.exc import OperationalError class TestWorkflowDeveloperAPIProtection: """Test developer API protection for workflow endpoints.""" @pytest.fixture def mock_settings_dev_api_disabled(self): """Mock settings with developer API disabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = False mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_execute_workflow_blocked_when_dev_api_disabled( self, client: AsyncClient, created_api_key, mock_settings_dev_api_disabled, # noqa: ARG002 ): """Test workflow execution is blocked when developer API is disabled.""" request_data = { "flow_id": "550e8400-e29b-41d4-a716-446655440000", "background": False, "stream": False, "inputs": None, } headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows", json=request_data, headers=headers, ) assert response.status_code == 403 result = response.json() assert result["detail"]["code"] == "DEVELOPER_API_DISABLED" assert "Developer API" in result["detail"]["message"] async def test_stop_workflow_blocked_when_dev_api_disabled( self, client: AsyncClient, created_api_key, mock_settings_dev_api_disabled, # noqa: ARG002 ): """Test POST workflows/stop endpoint is blocked when developer API is disabled.""" request_data = {"job_id": "550e8400-e29b-41d4-a716-446655440001"} headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows/stop", json=request_data, headers=headers, ) assert response.status_code == 403 result = response.json() assert result["detail"]["code"] == "DEVELOPER_API_DISABLED" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_execute_workflow_allowed_when_dev_api_enabled_flow_not_found( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST workflow execution is allowed when developer API is enabled - flow not found.""" request_data = { "flow_id": "550e8400-e29b-41d4-a716-446655440000", # Non-existent flow ID "background": False, "stream": False, "inputs": None, } headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows", json=request_data, headers=headers, ) # Should return 404 because flow doesn't exist, NOT because endpoint is disabled assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "FLOW_NOT_FOUND" assert "550e8400-e29b-41d4-a716-446655440000" in result["detail"]["flow_id"] async def test_get_workflow_allowed_when_dev_api_enabled_job_not_found( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET workflow endpoint is allowed when developer API is enabled - job not found.""" headers = {"x-api-key": created_api_key.api_key} response = await client.get( "api/v2/workflows?job_id=550e8400-e29b-41d4-a716-446655440001", # Non-existent job ID headers=headers, ) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" assert "550e8400-e29b-41d4-a716-446655440001" in result["detail"]["job_id"] assert "This endpoint is not available" not in response.text async def test_stop_workflow_allowed_when_dev_api_enabled_job_not_found( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST workflow/stop endpoint is allowed when developer API is enabled - job not found.""" request_data = { "job_id": "550e8400-e29b-41d4-a716-446655440001" # Non-existent job ID } headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows/stop", json=request_data, headers=headers, ) # Should return 404 because job doesn't exist, NOT because endpoint is disabled assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" assert "550e8400-e29b-41d4-a716-446655440001" in result["detail"]["job_id"] assert "This endpoint is not available" not in response.text async def test_get_workflow_blocked_when_dev_api_disabled( self, client: AsyncClient, created_api_key, mock_settings_dev_api_disabled, # noqa: ARG002 ): """Test GET workflow endpoint is blocked when developer API is disabled.""" headers = {"x-api-key": created_api_key.api_key} response = await client.get( "api/v2/workflows?job_id=550e8400-e29b-41d4-a716-446655440001", headers=headers, ) assert response.status_code == 403 result = response.json() assert result["detail"]["code"] == "DEVELOPER_API_DISABLED" async def test_execute_workflow_allowed_when_dev_api_enabled_flow_exists( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST /workflow allowed when dev API enabled - flow exists and executes.""" flow_id = uuid4() # Create a flow in the database using the established pattern async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Test flow for API testing", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow.id), "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows", json=request_data, headers=headers, ) # Should return 200 because flow is valid (empty nodes/edges is valid) # The execution will complete successfully with no outputs assert response.status_code == 200 result = response.json() # Verify response contains expected fields with proper structure assert "outputs" in result or "errors" in result if "outputs" in result: assert isinstance(result["outputs"], dict) if "errors" in result: assert isinstance(result["errors"], list) finally: # Clean up the flow following established pattern async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_get_workflow_allowed_when_dev_api_enabled_job_exists( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow allowed when dev API enabled - job exists (501 not implemented).""" # Since job management isn't implemented, we'll test with any job_id # The endpoint should return 501 regardless of whether the job exists headers = {"x-api-key": created_api_key.api_key} response = await client.get( "api/v2/workflows?job_id=550e8400-e29b-41d4-a716-446655440002", headers=headers, ) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" assert "This endpoint is not available" not in response.text async def test_stop_workflow_allowed_when_dev_api_enabled_job_exists( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST /workflow/stop allowed when dev API enabled - job exists (501 not implemented).""" # Since job management isn't implemented, we'll test with any job_id # The endpoint should return 501 regardless of whether the job exists request_data = {"job_id": "550e8400-e29b-41d4-a716-446655440002"} headers = {"x-api-key": created_api_key.api_key} response = await client.post( "api/v2/workflows/stop", json=request_data, headers=headers, ) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" assert "This endpoint is not available" not in response.text async def test_all_endpoints_require_api_key_authentication( self, client: AsyncClient, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that all workflow endpoints require API key authentication.""" # Test POST /workflow without API key request_data = { "flow_id": "550e8400-e29b-41d4-a716-446655440000", "background": False, "stream": False, "inputs": None, } response = await client.post( "api/v2/workflows", json=request_data, ) # The API returns 403 Forbidden for missing API keys (not 401 Unauthorized) # This is the correct behavior according to the api_key_security implementation assert response.status_code == 403 assert "API key must be passed" in response.json()["detail"] class TestWorkflowErrorHandling: """Test comprehensive error handling for workflow endpoints.""" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_flow_not_found_returns_404_with_error_code( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that non-existent flow returns 404 with FLOW_NOT_FOUND error code.""" flow_id = str(uuid4()) request_data = {"flow_id": flow_id, "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "FLOW_NOT_FOUND" assert result["detail"]["flow_id"] == flow_id assert "Verify the flow_id" in result["detail"]["message"] async def test_database_error_returns_503( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that database errors return 503 with DATABASE_ERROR code.""" flow_id = str(uuid4()) request_data = {"flow_id": flow_id, "background": False, "stream": False, "inputs": None} # Mock get_flow_by_id_or_endpoint_name to raise OperationalError with patch("langflow.api.v2.workflow.get_flow_by_id_or_endpoint_name") as mock_get_flow: mock_get_flow.side_effect = OperationalError("statement", "params", "orig") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 503 result = response.json() assert result["detail"]["code"] == "DATABASE_ERROR" assert "Failed to fetch flow" in result["detail"]["message"] assert result["detail"]["flow_id"] == flow_id async def test_flow_with_no_data_returns_500( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that flow with no data returns 500 with INVALID_FLOW_DATA code.""" flow_id = uuid4() # Create a flow with no data async with session_scope() as session: flow = Flow( id=flow_id, name="Flow with no data", description="Test flow with no data", data=None, # No data user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 400 result = response.json() assert result["detail"]["code"] == "INVALID_FLOW_DATA" assert "has no data" in result["detail"]["message"] assert result["detail"]["flow_id"] == str(flow_id) finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_graph_build_failure_returns_500( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that graph build failure returns 500 with INVALID_FLOW_DATA code.""" flow_id = uuid4() # Create a flow with invalid data that will fail validation/graph building async with session_scope() as session: flow = Flow( id=flow_id, name="Flow with invalid data", description="Test flow with invalid graph data", data={"invalid": "data"}, # Invalid graph data (missing 'nodes' field) user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 400 result = response.json() assert result["detail"]["code"] == "INVALID_FLOW_DATA" # The error message should indicate invalid flow data structure error_msg = result["detail"]["message"].lower() assert "invalid data structure" in error_msg or "must have nodes" in error_msg assert result["detail"]["flow_id"] == str(flow_id) finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_execution_timeout_with_real_delay( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that execution timeout works with real async delay.""" flow_id = uuid4() # Create a valid flow async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Test flow for timeout", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} # Mock execute_sync_workflow to sleep longer than timeout async def slow_execution(*args, **kwargs): # noqa: ARG001 await asyncio.sleep(2) # Sleep for 2 seconds return MagicMock() # Temporarily reduce timeout for testing with ( patch("langflow.api.v2.workflow.execute_sync_workflow", side_effect=slow_execution), patch("langflow.api.v2.workflow.EXECUTION_TIMEOUT", 0.5), # 0.5 second timeout ): headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 408 result = response.json() assert result["detail"]["code"] == "EXECUTION_TIMEOUT" assert "exceeded" in result["detail"]["message"] assert result["detail"]["flow_id"] == str(flow_id) assert "job_id" in result["detail"] finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_background_mode_returns_501( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that background mode returns 501 with NOT_IMPLEMENTED code.""" flow_id = uuid4() # Create a valid flow async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Test flow", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": True, # Background mode "stream": False, "inputs": None, } headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) # Now background mode is partially implemented and should NOT return 501 # It should return a WorkflowJobResponse (wrapped in WorkflowExecutionResponse or similar) assert response.status_code == 200 result = response.json() assert result["object"] == "job" assert result["status"] == "queued" assert result["flow_id"] == str(flow_id) assert "links" in result assert "status" in result["links"] finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_streaming_mode_returns_501( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that streaming mode returns 501 with NOT_IMPLEMENTED code.""" flow_id = uuid4() # Create a valid flow async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Test flow", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": False, "stream": True, # Streaming mode "inputs": None, } headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 501 result = response.json() assert result["detail"]["code"] == "NOT_IMPLEMENTED" assert "Streaming execution not yet implemented" in result["detail"]["message"] finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_error_response_structure( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that all error responses have consistent structure.""" flow_id = str(uuid4()) request_data = {"flow_id": flow_id, "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 404 result = response.json() # Verify error structure assert "detail" in result assert "error" in result["detail"] assert "code" in result["detail"] assert "message" in result["detail"] assert "flow_id" in result["detail"] # Verify types assert isinstance(result["detail"]["error"], str) assert isinstance(result["detail"]["code"], str) assert isinstance(result["detail"]["message"], str) assert isinstance(result["detail"]["flow_id"], str) async def test_workflow_validation_error_propagation( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that WorkflowValidationError is properly caught and converted to 500.""" flow_id = uuid4() # Create a flow async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Test flow", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} # Mock execute_sync_workflow to raise WorkflowValidationError with patch("langflow.api.v2.workflow.execute_sync_workflow") as mock_execute: mock_execute.side_effect = WorkflowValidationError("Test validation error") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 400 result = response.json() assert result["detail"]["code"] == "INVALID_FLOW_DATA" assert "Test validation error" in result["detail"]["message"] finally: # Clean up async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) # Test GET /workflow without API key response = await client.get("api/v2/workflows?job_id=550e8400-e29b-41d4-a716-446655440001") assert response.status_code == 403 assert "API key must be passed" in response.json()["detail"] class TestWorkflowSyncExecution: """Test synchronous workflow execution with realistic component mocking.""" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_sync_execution_with_empty_flow_returns_200( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test sync execution with empty flow returns 200 with empty outputs.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Empty Flow", description="Flow with no nodes", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() # Verify response structure assert "flow_id" in result assert result["flow_id"] == str(flow_id) assert "job_id" in result # Verify outputs or errors are present with actual content assert "outputs" in result or "errors" in result if "outputs" in result: assert isinstance(result["outputs"], dict) if "errors" in result: assert isinstance(result["errors"], list) # session_id is only present if provided in inputs finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_component_error_returns_200_with_error_in_body( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that component execution errors return 200 with error in response body.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Flow for testing component errors", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} # Mock run_graph_internal to raise a component execution error with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: mock_run.side_effect = Exception("Component execution failed: LLM API key not configured") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) # Component errors should return 200 with error in body assert response.status_code == 200 result = response.json() # Verify error is in response body (via create_error_response) assert "errors" in result assert len(result["errors"]) > 0 assert "Component execution failed" in str(result["errors"][0]) assert result["status"] == "failed" assert result["flow_id"] == str(flow_id) assert "job_id" in result finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_with_chat_input_output( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test sync execution with ChatInput and ChatOutput components.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Chat Flow", description="Flow with chat input/output", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: # Input format: component_id.param = value request_data = { "flow_id": str(flow_id), "background": False, "stream": False, "inputs": { "ChatInput-abc123.input_value": "Hello, how are you?", "ChatInput-abc123.session_id": "session-456", }, } # Mock successful execution with ChatOutput mock_result_data = MagicMock() mock_result_data.component_id = "ChatOutput-xyz789" mock_result_data.outputs = {"message": {"message": "I'm doing well, thank you for asking!", "type": "text"}} mock_result_data.metadata = {} # Wrap ResultData in RunOutputs mock_run_output = MagicMock() mock_run_output.outputs = [mock_result_data] with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: # run_graph_internal returns tuple[list[RunOutputs], str] mock_run.return_value = ([mock_run_output], "session-456") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() # Verify response structure assert result["flow_id"] == str(flow_id) assert "job_id" in result assert "outputs" in result # Note: Detailed content validation requires proper graph/vertex mocking # which is beyond the scope of unit tests. Integration tests should validate content. # Verify inputs were echoed back assert "inputs" in result assert result["inputs"] == request_data["inputs"] # Verify session_id is present when provided in inputs if "session_id" in result: assert result["session_id"] == "session-456" finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_with_llm_output( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test sync execution with LLM component output including model metadata.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="LLM Flow", description="Flow with LLM component", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": False, "stream": False, "inputs": { "ChatInput-abc.input_value": "Explain quantum computing", "OpenAIModel-def.temperature": 0.7, }, } # Mock LLM execution with model metadata mock_result_data = MagicMock() mock_result_data.component_id = "OpenAIModel-def" mock_result_data.outputs = { "model_output": { "message": { "message": "Quantum computing uses quantum mechanics...", "model_name": "gpt-4", "type": "text", } } } mock_result_data.metadata = {"tokens_used": 150} # Wrap ResultData in RunOutputs mock_run_output = MagicMock() mock_run_output.outputs = [mock_result_data] with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: # run_graph_internal returns tuple[list[RunOutputs], str] mock_run.return_value = ([mock_run_output], "session-789") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() assert result["flow_id"] == str(flow_id) assert "job_id" in result assert "outputs" in result # Note: Detailed content validation requires proper graph/vertex mocking finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_with_file_save_output( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test sync execution with SaveToFile component.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="File Save Flow", description="Flow with file save component", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": False, "stream": False, "inputs": { "TextInput-abc.text": "Content to save", "SaveToFile-xyz.file_path": "/tmp/output.txt", # noqa: S108 }, } # Mock SaveToFile execution mock_result_data = MagicMock() mock_result_data.component_id = "SaveToFile-xyz" mock_result_data.outputs = { "message": {"message": "File saved successfully to /tmp/output.txt", "type": "text"} } mock_result_data.metadata = {"bytes_written": 1024} # Wrap ResultData in RunOutputs mock_run_output = MagicMock() mock_run_output.outputs = [mock_result_data] with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: # run_graph_internal returns tuple[list[RunOutputs], str] mock_run.return_value = ([mock_run_output], "session-101") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() assert result["flow_id"] == str(flow_id) assert "job_id" in result assert "outputs" in result # Note: Detailed content validation requires proper graph/vertex mocking finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_with_multiple_terminal_nodes( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test sync execution with multiple terminal nodes (outputs).""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Multi-Output Flow", description="Flow with multiple terminal nodes", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": False, "stream": False, "inputs": {"ChatInput-abc.input_value": "Process this"}, } # Mock execution with multiple outputs mock_chat_output = MagicMock() mock_chat_output.component_id = "ChatOutput-aaa" mock_chat_output.outputs = {"message": {"message": "Chat response", "type": "text"}} mock_file_output = MagicMock() mock_file_output.component_id = "SaveToFile-bbb" mock_file_output.outputs = {"message": {"message": "File saved successfully", "type": "text"}} with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: # run_graph_internal returns tuple[list[RunOutputs], str] mock_run.return_value = ([mock_chat_output, mock_file_output], "session-202") headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() assert result["flow_id"] == str(flow_id) assert "job_id" in result assert "outputs" in result # Note: Detailed content validation requires proper graph/vertex mocking finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_response_structure_validation( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test that sync execution response has correct WorkflowExecutionResponse structure.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Test Flow", description="Flow for response validation", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = {"flow_id": str(flow_id), "background": False, "stream": False, "inputs": None} headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() # Verify WorkflowExecutionResponse structure assert "flow_id" in result assert isinstance(result["flow_id"], str) assert result["flow_id"] == str(flow_id) assert "job_id" in result assert isinstance(result["job_id"], str) # session_id is optional - only present if provided in inputs if "session_id" in result: assert isinstance(result["session_id"], str) assert "object" in result assert result["object"] == "response" assert "created_timestamp" in result assert isinstance(result["created_timestamp"], str) assert "status" in result assert result["status"] in ["completed", "failed", "running", "queued"] assert "errors" in result assert isinstance(result["errors"], list) assert "inputs" in result assert isinstance(result["inputs"], dict) assert "outputs" in result assert isinstance(result["outputs"], dict) finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) # Test POST /workflow/stop without API key response = await client.post( "api/v2/workflows/stop", json={"job_id": "550e8400-e29b-41d4-a716-446655440001"}, ) assert response.status_code == 403 assert "API key must be passed" in response.json()["detail"] class TestWorkflowBackgroundQueueing: """Test background workflow execution and queueing behavior.""" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_background_execution_flow( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test the full background job submission flow.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Background Flow", description="Flow for background testing", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() await session.refresh(flow) try: request_data = { "flow_id": str(flow_id), "background": True, "inputs": {"test.input": "data"}, } headers = {"x-api-key": created_api_key.api_key} # Mock uuid4 to return a predictable job_id mock_job_id = "550e8400-e29b-41d4-a716-446655440001" with ( patch("langflow.api.v2.workflow.get_task_service") as mock_get_task_service, patch("langflow.api.v2.workflow.uuid4", return_value=UUID(mock_job_id)), ): mock_task_service = MagicMock() # fire_and_forget_task is now awaited but its return value is not used for the job_id in response # as it uses graph.run_id. However, we still need it to be an awaitable if it's awaited. mock_task_service.fire_and_forget_task.return_value = asyncio.Future() mock_task_service.fire_and_forget_task.return_value.set_result(mock_job_id) mock_get_task_service.return_value = mock_task_service response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() assert result["job_id"] == mock_job_id assert result["flow_id"] == str(flow_id) assert result["object"] == "job" assert result["status"] == "queued" assert "links" in result assert "status" in result["links"] assert mock_job_id in result["links"]["status"] finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_background_execution_invalid_flow( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test background execution with a non-existent flow ID.""" request_data = { "flow_id": str(uuid4()), "background": True, } headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 404 detail = response.json()["detail"] message = detail["message"] if isinstance(detail, dict) else detail assert "does not exist" in message.lower() async def test_background_execution_queue_exception( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test handling of exceptions during task queueing.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Fail Flow", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() try: request_data = {"flow_id": str(flow_id), "background": True} headers = {"x-api-key": created_api_key.api_key} with patch("langflow.api.v2.workflow.get_task_service") as mock_get_task_service: mock_task_service = MagicMock() mock_task_service.fire_and_forget_task.side_effect = Exception("Queueing failed") mock_get_task_service.return_value = mock_task_service response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 500 detail = response.json()["detail"] message = detail["message"] if isinstance(detail, dict) else detail assert "Queueing failed" in message finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) async def test_sync_execution_error_handling( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test error handling during synchronous execution.""" flow_id = uuid4() async with session_scope() as session: flow = Flow( id=flow_id, name="Error Flow", data={"nodes": [], "edges": []}, user_id=created_api_key.user_id, ) session.add(flow) await session.flush() try: request_data = {"flow_id": str(flow_id), "background": False} headers = {"x-api-key": created_api_key.api_key} with patch("langflow.api.v2.workflow.run_graph_internal") as mock_run: mock_run.side_effect = Exception("Internal execution engine failure") response = await client.post("api/v2/workflows", json=request_data, headers=headers) assert response.status_code == 200 result = response.json() assert "errors" in result assert "Internal execution engine failure" in str(result["errors"]) finally: async with session_scope() as session: flow = await session.get(Flow, flow_id) if flow: await session.delete(flow) class TestWorkflowStatus: """Test workflow status retrieval endpoints.""" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_get_status_queued( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow returns 200 for a queued job.""" job_id = uuid4() flow_id = uuid4() mock_job = MagicMock() mock_job.job_id = job_id mock_job.flow_id = flow_id mock_job.status = JobStatus.QUEUED mock_job.type = JobType.WORKFLOW mock_job.created_timestamp = datetime.now(timezone.utc) with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} response = await client.get(f"api/v2/workflows?job_id={job_id}", headers=headers) assert response.status_code == 200 result = response.json() assert result["job_id"] == str(job_id) assert result["status"] == "queued" assert result["flow_id"] == str(flow_id) async def test_get_status_not_found( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow returns 404 for a non-existent job.""" job_id = uuid4() with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=None) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} response = await client.get(f"api/v2/workflows?job_id={job_id}", headers=headers) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" async def test_get_status_failed( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow returns 500 for a failed job.""" job_id = uuid4() mock_job = MagicMock() mock_job.job_id = job_id mock_job.status = JobStatus.FAILED mock_job.type = JobType.WORKFLOW with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} response = await client.get(f"api/v2/workflows?job_id={job_id}", headers=headers) assert response.status_code == 500 result = response.json() assert result["detail"]["code"] == "JOB_FAILED" assert result["detail"]["job_id"] == str(job_id) async def test_get_status_completed_reconstruction( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow returns reconstructed response for a completed job.""" job_id = uuid4() flow_id = uuid4() mock_job = MagicMock() mock_job.job_id = job_id mock_job.flow_id = flow_id mock_job.status = JobStatus.COMPLETED mock_job.type = JobType.WORKFLOW with ( patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service, patch("langflow.api.v2.workflow.get_flow_by_id_or_endpoint_name") as mock_get_flow, patch("langflow.api.v2.workflow.reconstruct_workflow_response_from_job_id") as mock_reconstruct, ): mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_get_job_service.return_value = mock_service mock_flow = MagicMock() mock_flow.id = flow_id mock_get_flow.return_value = mock_flow mock_reconstruct.return_value = {"flow_id": str(flow_id), "status": "completed", "outputs": {}} headers = {"x-api-key": created_api_key.api_key} response = await client.get(f"api/v2/workflows?job_id={job_id}", headers=headers) assert response.status_code == 200 result = response.json() assert result["status"] == "completed" mock_reconstruct.assert_called_once() async def test_get_status_timed_out( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test GET /workflow returns 408 for a timed out job.""" job_id = uuid4() flow_id = uuid4() mock_job = MagicMock() mock_job.job_id = job_id mock_job.flow_id = flow_id mock_job.status = JobStatus.TIMED_OUT mock_job.type = JobType.WORKFLOW with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} # Add timeout to client.get to avoid hanging if something goes wrong response = await client.get(f"api/v2/workflows?job_id={job_id}", headers=headers) assert response.status_code == 408 result = response.json() assert result["detail"]["code"] == "EXECUTION_TIMEOUT" assert result["detail"]["job_id"] == str(job_id) assert result["detail"]["flow_id"] == str(flow_id) class TestWorkflowStop: """Test workflow stop endpoints.""" @pytest.fixture def mock_settings_dev_api_enabled(self): """Mock settings with developer API enabled.""" with patch("langflow.api.v2.workflow.get_settings_service") as mock_get_settings_service: mock_service = MagicMock() mock_settings = MagicMock() mock_settings.developer_api_enabled = True mock_service.settings = mock_settings mock_get_settings_service.return_value = mock_service yield mock_settings async def test_stop_workflow_success( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST /workflow/stop cancels a running job.""" job_id = str(uuid4()) mock_job = MagicMock() mock_job.job_id = job_id mock_job.status = JobStatus.IN_PROGRESS with ( patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service, patch("langflow.api.v2.workflow.get_task_service") as mock_get_task_service, ): mock_job_service = MagicMock() mock_job_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_job_service.update_job_status = AsyncMock() mock_get_job_service.return_value = mock_job_service mock_task_service = MagicMock() mock_task_service.revoke_task = AsyncMock(return_value=True) mock_get_task_service.return_value = mock_task_service headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows/stop", json={"job_id": job_id}, headers=headers) assert response.status_code == 200 result = response.json() assert result["job_id"] == job_id assert "cancelled successfully" in result["message"] mock_task_service.revoke_task.assert_called_once() mock_job_service.update_job_status.assert_called_once_with(UUID(job_id), JobStatus.CANCELLED) async def test_stop_workflow_not_found( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST /workflow/stop returns 404 for non-existent job.""" job_id = str(uuid4()) with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=None) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows/stop", json={"job_id": job_id}, headers=headers) assert response.status_code == 404 result = response.json() assert result["detail"]["code"] == "JOB_NOT_FOUND" async def test_stop_workflow_already_cancelled( self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled, # noqa: ARG002 ): """Test POST /workflow/stop handles already cancelled jobs.""" job_id = str(uuid4()) mock_job = MagicMock() mock_job.job_id = job_id mock_job.status = JobStatus.CANCELLED with patch("langflow.api.v2.workflow.get_job_service") as mock_get_job_service: mock_service = MagicMock() mock_service.get_job_by_job_id = AsyncMock(return_value=mock_job) mock_get_job_service.return_value = mock_service headers = {"x-api-key": created_api_key.api_key} response = await client.post("api/v2/workflows/stop", json={"job_id": job_id}, headers=headers) assert response.status_code == 200 result = response.json() assert result["job_id"] == job_id assert "already cancelled" in result["message"]
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v2/test_workflow.py", "license": "MIT License", "lines": 1331, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/schema/workflow.py
"""Workflow execution schemas for V2 API.""" from __future__ import annotations from datetime import datetime, timezone from enum import Enum from typing import Annotated, Any, Literal from uuid import UUID from pydantic import BaseModel, BeforeValidator, ConfigDict, Field, model_validator from lfx.schema.validators import null_check_validator, uuid_validator class JobStatus(str, Enum): """Job execution status.""" QUEUED = "queued" IN_PROGRESS = "in_progress" COMPLETED = "completed" FAILED = "failed" CANCELLED = "cancelled" TIMED_OUT = "timed_out" JobId = Annotated[ str | UUID, BeforeValidator(lambda v: null_check_validator(v, message="job_id is required")), BeforeValidator(lambda v: uuid_validator(v, message="Invalid job_id, must be a UUID")), ] class ErrorDetail(BaseModel): """Error detail schema.""" error: str code: str | None = None details: dict[str, Any] | None = None class ComponentOutput(BaseModel): """Component output schema.""" type: str = Field(..., description="Type of the component output (e.g., 'message', 'data', 'tool', 'text')") status: JobStatus content: Any | None = None metadata: dict[str, Any] | None = None class WorkflowExecutionRequest(BaseModel): """Request schema for workflow execution.""" background: bool = False stream: bool = False flow_id: str inputs: dict[str, Any] | None = Field( None, description="Component-specific inputs in flat format: 'component_id.param_name': value" ) @model_validator(mode="after") def validate_execution_mode(self) -> WorkflowExecutionRequest: if self.background and self.stream: err_msg = "Both 'background' and 'stream' cannot be True" raise ValueError(err_msg) return self model_config = ConfigDict( json_schema_extra={ "examples": [ { "background": False, "stream": False, "flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b", "inputs": { "ChatInput-abc.input_value": "Hello, how can you help me today?", "ChatInput-abc.session_id": "session-123", "LLM-xyz.temperature": 0.7, "LLM-xyz.max_tokens": 100, "OpenSearch-def.opensearch_url": "https://opensearch:9200", }, }, { "background": True, "stream": False, "flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b", "inputs": { "ChatInput-abc.input_value": "Process this in the background", }, }, { "background": False, "stream": True, "flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b", "inputs": { "ChatInput-abc.input_value": "Stream this conversation", }, }, ] }, extra="forbid", ) class WorkflowExecutionResponse(BaseModel): """Synchronous workflow execution response.""" flow_id: str job_id: JobId | None = None object: Literal["response"] = Field(default="response") created_timestamp: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) status: JobStatus errors: list[ErrorDetail] = [] inputs: dict[str, Any] = {} outputs: dict[str, ComponentOutput] = {} class WorkflowJobResponse(BaseModel): """Background job response.""" job_id: JobId flow_id: str object: Literal["job"] = Field(default="job") created_timestamp: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) status: JobStatus links: dict[str, str] = Field(default_factory=dict) errors: list[ErrorDetail] = [] @model_validator(mode="after") def build_links(self) -> WorkflowJobResponse: """Automatically populate links for the client.""" if not self.links: self.links = { "status": f"/api/v2/workflows?job_id={self.job_id!s}", "stop": "/api/v2/workflows/stop", } return self class WorkflowStreamEvent(BaseModel): """Streaming event response.""" type: str run_id: str timestamp: int raw_event: dict[str, Any] class WorkflowStopRequest(BaseModel): """Request schema for stopping workflow.""" job_id: JobId class WorkflowStopResponse(BaseModel): """Response schema for stopping workflow.""" job_id: JobId message: str | None = None # OpenAPI response definitions WORKFLOW_EXECUTION_RESPONSES = { 200: { "description": "Workflow execution response", "content": { "application/json": { "schema": { "oneOf": [ WorkflowExecutionResponse.model_json_schema(), WorkflowJobResponse.model_json_schema(), ], "discriminator": { "propertyName": "object", "mapping": { "response": "#/components/schemas/WorkflowExecutionResponse", "job": "#/components/schemas/WorkflowJobResponse", }, }, } }, "text/event-stream": { "schema": WorkflowStreamEvent.model_json_schema(), "description": "Server-sent events for streaming execution", }, }, } } WORKFLOW_STATUS_RESPONSES = { 200: { "description": "Workflow status response", "content": { "application/json": {"schema": WorkflowExecutionResponse.model_json_schema()}, "text/event-stream": { "schema": WorkflowStreamEvent.model_json_schema(), "description": "Server-sent events for streaming status", }, }, } }
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/schema/workflow.py", "license": "MIT License", "lines": 161, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/tests/unit/utils/test_mustache_security.py
"""Tests for mustache security utilities.""" import pytest from langflow.utils.mustache_security import safe_mustache_render, validate_mustache_template class TestMustacheSecurity: """Test mustache security functions.""" def test_validate_accepts_simple_variables(self): """Test that simple variables are accepted.""" # Should not raise validate_mustache_template("Hello {{name}}!") validate_mustache_template("{{user_name}} - {{user_email}}") validate_mustache_template("Price: {{price_100}}") validate_mustache_template("") validate_mustache_template("No variables here") def test_validate_rejects_complex_syntax(self): """Test that complex mustache syntax is rejected.""" # Conditionals with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{#if}}content{{/if}}") # Inverted sections with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{^empty}}not empty{{/empty}}") # Unescaped with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{&html}}") # Partials with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{>header}}") # Comments with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{!comment}}") # Current context with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{.}}") def test_validate_rejects_invalid_variable_names(self): """Test that invalid variable names are rejected.""" # Spaces in variable names with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{ name with spaces }}") # Starting with numbers with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{123invalid}}") # Special characters with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{price-$100}}") # Empty variables with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{}}") # Dot notation (not supported) with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{user.name}}") def test_safe_render_simple_variables(self): """Test safe rendering of simple variables.""" template = "Hello {{name}}! You are {{age}} years old." variables = {"name": "Alice", "age": 25} result = safe_mustache_render(template, variables) assert result == "Hello Alice! You are 25 years old." def test_safe_render_missing_variables(self): """Test rendering with missing variables.""" template = "Hello {{name}}! Your score is {{score}}." variables = {"name": "Charlie"} result = safe_mustache_render(template, variables) assert result == "Hello Charlie! Your score is ." def test_safe_render_none_values(self): """Test rendering with None values.""" template = "Name: {{name}}, Age: {{age}}" variables = {"name": None, "age": None} result = safe_mustache_render(template, variables) assert result == "Name: , Age: " def test_safe_render_numeric_values(self): """Test rendering with numeric values.""" template = "Price: ${{price}}, Quantity: {{qty}}" variables = {"price": 19.99, "qty": 3} result = safe_mustache_render(template, variables) assert result == "Price: $19.99, Quantity: 3" def test_safe_render_rejects_complex_syntax(self): """Test that safe_render rejects complex syntax.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): safe_mustache_render("{{#if}}test{{/if}}", {"if": True}) def test_safe_render_multiple_variables(self): """Test rendering with multiple variables.""" template = "{{greeting}} {{name}}, welcome to {{place}}!" variables = {"greeting": "Hello", "name": "Alice", "place": "Langflow"} result = safe_mustache_render(template, variables) assert result == "Hello Alice, welcome to Langflow!" def test_safe_render_underscore_variables(self): """Test rendering with underscore variable names.""" template = "Private: {{_private_var}}, Public: {{public_var}}" variables = {"_private_var": "secret", "public_var": "visible"} result = safe_mustache_render(template, variables) assert result == "Private: secret, Public: visible" def test_safe_render_empty_string_values(self): """Test rendering with empty string values.""" template = "Start{{middle}}End" variables = {"middle": ""} result = safe_mustache_render(template, variables) assert result == "StartEnd"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/utils/test_mustache_security.py", "license": "MIT License", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/utils/mustache_security.py
"""Security utilities for mustache template processing.""" import re from typing import Any # Regex pattern for simple variables only - same as frontend SIMPLE_VARIABLE_PATTERN = re.compile(r"\{\{([a-zA-Z_][a-zA-Z0-9_]*)\}\}") # Patterns for complex mustache syntax that we want to block DANGEROUS_PATTERNS = [ re.compile(r"\{\{\{"), # Triple braces (unescaped HTML in Mustache) re.compile(r"\{\{#"), # Conditionals/sections start re.compile(r"\{\{/"), # Conditionals/sections end re.compile(r"\{\{\^"), # Inverted sections re.compile(r"\{\{&"), # Unescaped variables re.compile(r"\{\{>"), # Partials re.compile(r"\{\{!"), # Comments re.compile(r"\{\{\."), # Current context ] def validate_mustache_template(template: str) -> None: """Validate that a mustache template only contains simple variable substitutions. Raises ValueError if complex mustache syntax is detected. """ if not template: return # Check for dangerous patterns for pattern in DANGEROUS_PATTERNS: if pattern.search(template): msg = ( "Complex mustache syntax is not allowed. Only simple variable substitution " "like {{variable}} is permitted." ) raise ValueError(msg) # Check that all {{ }} patterns are simple variables all_mustache_patterns = re.findall(r"\{\{[^}]*\}\}", template) for pattern in all_mustache_patterns: if not SIMPLE_VARIABLE_PATTERN.match(pattern): msg = f"Invalid mustache variable: {pattern}. Only simple variable names like {{{{variable}}}} are allowed." raise ValueError(msg) def safe_mustache_render(template: str, variables: dict[str, Any]) -> str: """Safely render a mustache template with only simple variable substitution. This function performs a single-pass replacement of all {{variable}} patterns. Variable values that themselves contain mustache-like patterns (e.g., "{{other}}") will NOT be processed - they are treated as literal strings. This prevents injection attacks where user-controlled values could introduce new template variables. Args: template: The mustache template string variables: Dictionary of variables to substitute Returns: The rendered template Raises: ValueError: If template contains complex mustache syntax """ # Validate template first validate_mustache_template(template) # Simple replacement - find all simple variables and replace them def replace_variable(match): var_name = match.group(1) # Get the variable value directly (no dot notation support) value = variables.get(var_name, "") # Convert to string return str(value) if value is not None else "" # Replace all simple variables return SIMPLE_VARIABLE_PATTERN.sub(replace_variable, template)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/utils/mustache_security.py", "license": "MIT License", "lines": 61, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/lfx/tests/unit/components/test_prompt_component.py
"""Tests for PromptComponent with f-string and double brackets syntax.""" from lfx.components.models_and_agents.prompt import PromptComponent class TestPromptComponent: """Test the PromptComponent.""" def test_update_template_single_variable(self): """Test template update with a single variable.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello {name}!"}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert "name" in result["custom_fields"]["template"] assert "name" in result["template"] def test_update_template_multiple_variables(self): """Test template with multiple variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "{greeting} {name}!"}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert "greeting" in result["custom_fields"]["template"] assert "name" in result["custom_fields"]["template"] def test_update_template_duplicate_variables(self): """Test template with duplicate variables only creates one field.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello {name}! How are you {name}?"}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert result["custom_fields"]["template"].count("name") == 1 assert "name" in result["template"] def test_update_template_no_variables(self): """Test template with no variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello World!"}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert len(result["custom_fields"].get("template", [])) == 0 def test_update_template_escaped_braces(self): """Test template with escaped braces doesn't create variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Result: {{not_a_var}} but {real_var} works"}, }, "custom_fields": {}, } result = component._update_template(frontend_node) # Only {real_var} should be extracted assert "real_var" in result["custom_fields"]["template"] # {{not_a_var}} should NOT be extracted in f-string mode assert "not_a_var" not in result["custom_fields"]["template"] async def test_build_prompt_basic(self): """Test building a basic prompt.""" component = PromptComponent() component._attributes = { "template": "Hello {name}!", "name": "World", } result = await component.build_prompt() assert result.text == "Hello World!" assert result.template == "Hello {name}!" async def test_build_prompt_multiple_variables(self): """Test building prompt with multiple variables.""" component = PromptComponent() component._attributes = { "template": "{greeting} {name}! You are {age} years old.", "greeting": "Hello", "name": "Alice", "age": "25", } result = await component.build_prompt() assert result.text == "Hello Alice! You are 25 years old." async def test_update_frontend_node(self): """Test update_frontend_node processes template correctly.""" component = PromptComponent() new_node = { "template": { "template": {"value": "Hello {name}!"}, }, "custom_fields": {}, } current_node = { "template": {"template": {"value": ""}}, } result = await component.update_frontend_node(new_node, current_node) assert "name" in result["custom_fields"]["template"] assert "name" in result["template"] async def test_update_frontend_node_creates_variable_fields(self): """Test that update_frontend_node creates fields for template variables.""" component = PromptComponent() new_node = { "template": { "template": {"value": "Hello {name} and {greeting}!"}, }, "custom_fields": {}, } current_node = { "template": {"template": {"value": ""}}, } result = await component.update_frontend_node(new_node, current_node) # Both variables should be in custom_fields assert "name" in result["custom_fields"]["template"] assert "greeting" in result["custom_fields"]["template"] class TestPromptComponentDoubleBrackets: """Test the PromptComponent with double brackets (mustache) syntax.""" def test_update_template_double_brackets_single_variable(self): """Test template update with a single double-bracket variable.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello {{name}}!"}, "use_double_brackets": {"value": True}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert "name" in result["custom_fields"]["template"] assert "name" in result["template"] def test_update_template_double_brackets_multiple_variables(self): """Test template with multiple double-bracket variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "{{greeting}} {{name}}!"}, "use_double_brackets": {"value": True}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert "greeting" in result["custom_fields"]["template"] assert "name" in result["custom_fields"]["template"] def test_update_template_double_brackets_no_variables(self): """Test double-bracket template with no variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello World!"}, "use_double_brackets": {"value": True}, }, "custom_fields": {}, } result = component._update_template(frontend_node) assert len(result["custom_fields"].get("template", [])) == 0 def test_update_template_double_brackets_ignores_single_braces(self): """Test that double-bracket mode ignores single-brace variables.""" component = PromptComponent() frontend_node = { "template": { "template": {"value": "Hello {single} and {{double}}!"}, "use_double_brackets": {"value": True}, }, "custom_fields": {}, } result = component._update_template(frontend_node) # Only {{double}} should be extracted in double-bracket mode assert "double" in result["custom_fields"]["template"] # {single} should NOT be extracted assert "single" not in result["custom_fields"].get("template", []) async def test_build_prompt_double_brackets_basic(self): """Test building a basic prompt with double brackets.""" component = PromptComponent() component.use_double_brackets = True component._attributes = { "template": "Hello {{name}}!", "name": "World", "use_double_brackets": True, } result = await component.build_prompt() assert result.text == "Hello World!" async def test_build_prompt_double_brackets_multiple_variables(self): """Test building prompt with multiple double-bracket variables.""" component = PromptComponent() component.use_double_brackets = True component._attributes = { "template": "{{greeting}} {{name}}! You are {{age}} years old.", "greeting": "Hello", "name": "Alice", "age": "25", "use_double_brackets": True, } result = await component.build_prompt() assert result.text == "Hello Alice! You are 25 years old." async def test_build_prompt_default_is_single_brackets(self): """Test that default mode uses single brackets (f-string).""" component = PromptComponent() # Don't set use_double_brackets - should default to False component._attributes = { "template": "Hello {name}!", "name": "World", } result = await component.build_prompt() assert result.text == "Hello World!" async def test_update_frontend_node_double_brackets(self): """Test update_frontend_node processes double-bracket template correctly.""" component = PromptComponent() new_node = { "template": { "template": {"value": "Hello {{name}}!"}, "use_double_brackets": {"value": True}, }, "custom_fields": {}, } current_node = { "template": {"template": {"value": ""}}, } result = await component.update_frontend_node(new_node, current_node) assert "name" in result["custom_fields"]["template"] assert "name" in result["template"] def test_update_build_config_switches_to_mustache(self): """Test that update_build_config switches field type when enabling double brackets.""" component = PromptComponent() build_config = { "template": {"type": "prompt", "value": "Hello {{name}}!"}, "custom_fields": {"template": []}, } result = component.update_build_config(build_config, field_value=True, field_name="use_double_brackets") assert result["template"]["type"] == "mustache" def test_update_build_config_switches_to_fstring(self): """Test that update_build_config switches field type when disabling double brackets.""" component = PromptComponent() build_config = { "template": {"type": "MustachePrompt", "value": "Hello {name}!"}, "custom_fields": {"template": []}, } result = component.update_build_config(build_config, field_value=False, field_name="use_double_brackets") assert result["template"]["type"] == "prompt"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/components/test_prompt_component.py", "license": "MIT License", "lines": 240, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/schema/test_mustache_template_processing.py
"""Tests for mustache template processing in the Message class. Note: Our mustache implementation only supports simple variable substitution for security reasons. Complex features like conditionals, loops, and sections are not supported. """ import pytest from lfx.schema.message import Message from lfx.utils.mustache_security import validate_mustache_template class TestMustacheTemplateProcessing: """Test mustache template processing in the Message class.""" def test_format_text_mustache_basic(self): """Test basic mustache template formatting.""" message = Message(template="Hello {{name}}!", variables={"name": "World"}) result = message.format_text(template_format="mustache") assert result == "Hello World!" assert message.text == "Hello World!" def test_format_text_mustache_multiple_variables(self): """Test mustache template with multiple variables.""" message = Message( template="Hello {{name}}! You are {{age}} years old.", variables={"name": "Alice", "age": "25"} ) result = message.format_text(template_format="mustache") assert result == "Hello Alice! You are 25 years old." def test_format_text_mustache_missing_variable(self): """Test mustache template with missing variable.""" message = Message(template="Hello {{name}}! You are {{age}} years old.", variables={"name": "Bob"}) result = message.format_text(template_format="mustache") # Missing variables should render as empty strings assert result == "Hello Bob! You are years old." def test_format_text_mustache_no_variables(self): """Test mustache template with no variables.""" message = Message(template="Hello World!", variables={}) result = message.format_text(template_format="mustache") assert result == "Hello World!" def test_format_text_mustache_empty_template(self): """Test mustache template with empty template.""" message = Message(template="", variables={"name": "Test"}) result = message.format_text(template_format="mustache") assert result == "" def test_format_text_mustache_with_numeric_values(self): """Test mustache template with numeric values.""" message = Message( template="Price: ${{price}}, Quantity: {{quantity}}", variables={"price": 19.99, "quantity": 3} ) result = message.format_text(template_format="mustache") assert result == "Price: $19.99, Quantity: 3" def test_format_text_mustache_with_newlines(self): """Test mustache template with newlines.""" message = Message( template="Line 1: {{line1}}\nLine 2: {{line2}}", variables={"line1": "First", "line2": "Second"} ) result = message.format_text(template_format="mustache") assert result == "Line 1: First\nLine 2: Second" def test_format_text_mustache_with_empty_string_variable(self): """Test mustache template with empty string variable.""" message = Message(template="Hello {{name}}!", variables={"name": ""}) result = message.format_text(template_format="mustache") assert result == "Hello !" def test_format_text_mustache_with_none_variable(self): """Test mustache template with None variable.""" message = Message(template="Hello {{name}}!", variables={"name": None}) result = message.format_text(template_format="mustache") # None should render as empty string assert result == "Hello !" async def test_from_template_and_variables_mustache(self): """Test from_template_and_variables with mustache format.""" message = await Message.from_template_and_variables( template="Hello {{name}}!", template_format="mustache", name="World" ) assert isinstance(message, Message) assert message.text == "Hello World!" assert message.template == "Hello {{name}}!" assert message.variables == {"name": "World"} async def test_from_template_and_variables_mustache_no_variables(self): """Test from_template_and_variables with no variables.""" message = await Message.from_template_and_variables(template="Static message", template_format="mustache") assert isinstance(message, Message) assert message.text == "Static message" assert message.variables == {} def test_format_text_mustache_preserves_original_variables(self): """Test that format_text doesn't modify the original variables.""" original_variables = {"name": "Test", "age": 25} message = Message(template="Hello {{name}}, age {{age}}!", variables=original_variables.copy()) result = message.format_text(template_format="mustache") assert result == "Hello Test, age 25!" assert message.variables == original_variables def test_format_text_mustache_with_zero_values(self): """Test mustache template with zero values.""" message = Message(template="Count: {{count}}, Price: {{price}}", variables={"count": 0, "price": 0.0}) result = message.format_text(template_format="mustache") assert result == "Count: 0, Price: 0.0" def test_mustache_security_rejects_conditionals(self): """Test that conditional syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{#show}}Hello{{/show}}") def test_mustache_security_rejects_inverted_sections(self): """Test that inverted section syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{^items}}No items{{/items}}") def test_mustache_security_rejects_loops(self): """Test that loop syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{#items}}{{.}}{{/items}}") def test_mustache_security_rejects_unescaped_variables(self): """Test that unescaped variable syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{&variable}}") def test_mustache_security_rejects_partials(self): """Test that partial syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{>partial}}") def test_mustache_security_rejects_comments(self): """Test that comment syntax is rejected for security.""" with pytest.raises(ValueError, match="Complex mustache syntax is not allowed"): validate_mustache_template("{{!comment}}") def test_mustache_security_allows_simple_variables(self): """Test that simple variables are allowed.""" # Should not raise validate_mustache_template("Hello {{name}}!") validate_mustache_template("{{var1}} and {{var2}}") def test_mustache_security_rejects_dot_notation(self): """Test that dot notation is NOT allowed.""" with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{user.name}}") with pytest.raises(ValueError, match="Invalid mustache variable"): validate_mustache_template("{{company.ceo.name}}") def test_format_text_defaults_to_f_string(self): """Test that format_text defaults to f-string format.""" message = Message(template="Hello {name}!", variables={"name": "World"}) result = message.format_text() # No template_format specified assert result == "Hello World!"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/schema/test_mustache_template_processing.py", "license": "MIT License", "lines": 131, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/components/models_and_agents/test_ibm_granite_handler.py
"""Tests for IBM Granite handler functions. This module tests the specialized handling for IBM Granite models which have different tool calling behavior compared to other LLMs. """ import contextlib from unittest.mock import Mock, patch import pytest from langchain_core.messages import AIMessage from lfx.components.langchain_utilities.ibm_granite_handler import ( PLACEHOLDER_PATTERN, create_granite_agent, detect_placeholder_in_args, get_enhanced_system_prompt, is_granite_model, is_watsonx_model, ) # ============================================================================= # Tests for is_watsonx_model function # ============================================================================= def create_mock_tool(tool_name: str) -> Mock: """Create a mock tool with proper name attribute.""" mock = Mock() mock.name = tool_name return mock class TestIsWatsonxModel: """Test suite for is_watsonx_model function.""" def test_detects_chatwatsonx_class(self): """Test detection of ChatWatsonx class by name.""" mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" result = is_watsonx_model(mock_llm) assert result is True def test_detects_watsonx_in_class_name(self): """Test detection when class name contains 'watsonx'.""" mock_llm = Mock() mock_llm.__class__.__name__ = "WatsonxLLM" result = is_watsonx_model(mock_llm) assert result is True def test_detects_by_module_langchain_ibm(self): """Test detection by langchain_ibm module.""" mock_llm = Mock() mock_llm.__class__.__name__ = "SomeModel" mock_llm.__class__.__module__ = "langchain_ibm.chat" result = is_watsonx_model(mock_llm) assert result is True def test_detects_by_module_watsonx(self): """Test detection by watsonx in module name.""" mock_llm = Mock() mock_llm.__class__.__name__ = "SomeModel" mock_llm.__class__.__module__ = "some.watsonx.module" result = is_watsonx_model(mock_llm) assert result is True def test_returns_false_for_openai(self): """Test returns False for OpenAI models.""" mock_llm = Mock() mock_llm.__class__.__name__ = "ChatOpenAI" mock_llm.__class__.__module__ = "langchain_openai.chat_models" result = is_watsonx_model(mock_llm) assert result is False def test_returns_false_for_anthropic(self): """Test returns False for Anthropic models.""" mock_llm = Mock() mock_llm.__class__.__name__ = "ChatAnthropic" mock_llm.__class__.__module__ = "langchain_anthropic" result = is_watsonx_model(mock_llm) assert result is False def test_case_insensitive_class_name(self): """Test case insensitive detection for class name.""" mock_llm = Mock() mock_llm.__class__.__name__ = "CHATWATSONX" result = is_watsonx_model(mock_llm) assert result is True def test_case_insensitive_module_name(self): """Test case insensitive detection for module name.""" mock_llm = Mock() mock_llm.__class__.__name__ = "SomeModel" mock_llm.__class__.__module__ = "LANGCHAIN_IBM.chat" result = is_watsonx_model(mock_llm) assert result is True def test_works_with_real_mock_structure(self): """Test with a more realistic mock structure.""" # Simulate what a real ChatWatsonx instance would look like class FakeChatWatsonx: pass mock_llm = FakeChatWatsonx() result = is_watsonx_model(mock_llm) assert result is True def test_detects_llama_on_watsonx(self): """Test detection of Llama model running on WatsonX.""" mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "meta-llama/llama-3-2-11b-vision" result = is_watsonx_model(mock_llm) assert result is True def test_detects_mistral_on_watsonx(self): """Test detection of Mistral model running on WatsonX.""" mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "mistralai/mistral-large" result = is_watsonx_model(mock_llm) assert result is True # ============================================================================= # Tests for is_granite_model function (deprecated but kept for compatibility) # ============================================================================= class TestIsGraniteModel: """Test suite for is_granite_model function.""" def test_is_granite_model_with_model_id_granite(self): """Test detection when model_id contains 'granite'.""" mock_llm = Mock() mock_llm.model_id = "ibm/granite-13b-chat-v2" result = is_granite_model(mock_llm) assert result is True def test_is_granite_model_with_model_name_granite(self): """Test detection when model_name contains 'granite'.""" mock_llm = Mock(spec=["model_name"]) mock_llm.model_name = "granite-3.1-8b-instruct" result = is_granite_model(mock_llm) assert result is True def test_is_granite_model_case_insensitive(self): """Test that detection is case insensitive.""" mock_llm = Mock() mock_llm.model_id = "IBM/GRANITE-13B-CHAT" result = is_granite_model(mock_llm) assert result is True def test_is_granite_model_mixed_case(self): """Test detection with mixed case.""" mock_llm = Mock() mock_llm.model_id = "ibm/GrAnItE-model" result = is_granite_model(mock_llm) assert result is True def test_is_granite_model_not_granite(self): """Test returns False for non-Granite models.""" mock_llm = Mock() mock_llm.model_id = "meta-llama/llama-3-70b-instruct" result = is_granite_model(mock_llm) assert result is False def test_is_granite_model_openai(self): """Test returns False for OpenAI models.""" mock_llm = Mock() mock_llm.model_id = "gpt-4" mock_llm.model_name = "gpt-4-turbo" result = is_granite_model(mock_llm) assert result is False def test_is_granite_model_empty_model_id(self): """Test with empty model_id.""" mock_llm = Mock() mock_llm.model_id = "" mock_llm.model_name = "" result = is_granite_model(mock_llm) assert result is False def test_is_granite_model_none_model_id(self): """Test with None model_id.""" mock_llm = Mock() mock_llm.model_id = None mock_llm.model_name = None result = is_granite_model(mock_llm) assert result is False def test_is_granite_model_no_attributes(self): """Test with model that has neither model_id nor model_name.""" mock_llm = Mock(spec=[]) # No attributes result = is_granite_model(mock_llm) assert result is False def test_is_granite_model_fallback_to_model_name(self): """Test fallback to model_name when model_id is not available.""" mock_llm = Mock(spec=["model_name"]) mock_llm.model_name = "granite-3b" result = is_granite_model(mock_llm) assert result is True def test_is_granite_model_partial_match(self): """Test that partial match works (granite anywhere in string).""" mock_llm = Mock() mock_llm.model_id = "some-prefix-granite-suffix" result = is_granite_model(mock_llm) assert result is True # ============================================================================= # Tests for get_enhanced_system_prompt function # ============================================================================= class TestGetEnhancedSystemPrompt: """Test suite for get_enhanced_system_prompt function.""" def test_enhances_prompt_with_multiple_tools(self): """Test that prompt is enhanced when multiple tools are provided.""" base_prompt = "You are a helpful assistant." mock_tools = [ create_mock_tool("search_tool"), create_mock_tool("calculator_tool"), create_mock_tool("date_tool"), ] result = get_enhanced_system_prompt(base_prompt, mock_tools) assert base_prompt in result assert "TOOL USAGE GUIDELINES" in result assert "search_tool" in result assert "calculator_tool" in result assert "date_tool" in result def test_no_enhancement_with_empty_tools(self): """Test that prompt is not enhanced when tools list is empty.""" base_prompt = "You are a helpful assistant." result = get_enhanced_system_prompt(base_prompt, []) assert result == base_prompt def test_no_enhancement_with_none_tools(self): """Test that prompt is not enhanced when tools is None.""" base_prompt = "You are a helpful assistant." result = get_enhanced_system_prompt(base_prompt, None) assert result == base_prompt def test_no_enhancement_with_single_tool(self): """Test that prompt is not enhanced with only one tool.""" base_prompt = "You are a helpful assistant." mock_tools = [create_mock_tool("single_tool")] result = get_enhanced_system_prompt(base_prompt, mock_tools) assert result == base_prompt def test_enhancement_with_two_tools(self): """Test that prompt is enhanced with exactly two tools.""" base_prompt = "You are a helpful assistant." mock_tools = [create_mock_tool("tool1"), create_mock_tool("tool2")] result = get_enhanced_system_prompt(base_prompt, mock_tools) assert "TOOL USAGE GUIDELINES" in result def test_empty_base_prompt(self): """Test with empty base prompt.""" mock_tools = [create_mock_tool("tool1"), create_mock_tool("tool2")] result = get_enhanced_system_prompt("", mock_tools) assert "TOOL USAGE GUIDELINES" in result def test_enhancement_contains_key_instructions(self): """Test that enhancement contains all key instructions.""" base_prompt = "Base prompt" mock_tools = [create_mock_tool("tool1"), create_mock_tool("tool2")] result = get_enhanced_system_prompt(base_prompt, mock_tools) assert "ALWAYS call tools" in result assert "one tool at a time" in result assert "placeholder syntax" in result assert "AVAILABLE TOOLS" in result def test_tool_names_listed(self): """Test that all tool names are listed in the enhancement.""" mock_tools = [ create_mock_tool("perform_search"), create_mock_tool("get_current_date"), create_mock_tool("evaluate_expression"), ] result = get_enhanced_system_prompt("Base", mock_tools) assert "perform_search" in result assert "get_current_date" in result assert "evaluate_expression" in result # ============================================================================= # Tests for detect_placeholder_in_args function # ============================================================================= class TestDetectPlaceholderInArgs: """Test suite for detect_placeholder_in_args function.""" def test_detects_result_from_placeholder(self): """Test detection of <result-from-...> placeholder.""" tool_calls = [{"name": "calculator", "args": {"expression": "<result-from-search>"}}] has_placeholder, value = detect_placeholder_in_args(tool_calls) assert has_placeholder is True assert value == "<result-from-search>" def test_detects_extracted_date_placeholder(self): """Test detection of <extracted_date> placeholder.""" tool_calls = [{"name": "calculator", "args": {"expression": "<extracted_date>-18"}}] has_placeholder, value = detect_placeholder_in_args(tool_calls) assert has_placeholder is True assert "<extracted_date>" in value def test_detects_previous_value_placeholder(self): """Test detection of <previous-value> placeholder.""" tool_calls = [{"name": "tool", "args": {"input": "<previous-value>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_detects_output_placeholder(self): """Test detection of <output-...> placeholder.""" tool_calls = [{"name": "tool", "args": {"data": "<output-from-api>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_detects_response_placeholder(self): """Test detection of <response-...> placeholder.""" tool_calls = [{"name": "tool", "args": {"value": "<response-data>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_detects_current_placeholder(self): """Test detection of <current-...> placeholder.""" tool_calls = [{"name": "tool", "args": {"date": "<current-date>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_detects_search_result_placeholder(self): """Test detection of <search-result> placeholder.""" tool_calls = [{"name": "tool", "args": {"query": "<search-result>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_detects_tool_result_placeholder(self): """Test detection of <tool-output> placeholder.""" tool_calls = [{"name": "tool", "args": {"input": "<tool-output>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_no_placeholder_returns_false(self): """Test returns False when no placeholder is present.""" tool_calls = [{"name": "calculator", "args": {"expression": "2 + 2"}}] has_placeholder, value = detect_placeholder_in_args(tool_calls) assert has_placeholder is False assert value is None def test_empty_tool_calls(self): """Test with empty tool_calls list.""" has_placeholder, value = detect_placeholder_in_args([]) assert has_placeholder is False assert value is None def test_none_tool_calls(self): """Test with None tool_calls.""" has_placeholder, value = detect_placeholder_in_args(None) assert has_placeholder is False assert value is None def test_args_as_string(self): """Test detection when args is a string instead of dict.""" tool_calls = [{"name": "tool", "args": "<result-from-previous>"}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_multiple_tool_calls_first_has_placeholder(self): """Test with multiple tool calls where first has placeholder.""" tool_calls = [ {"name": "tool1", "args": {"value": "<result-from-api>"}}, {"name": "tool2", "args": {"value": "normal"}}, ] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_multiple_tool_calls_second_has_placeholder(self): """Test with multiple tool calls where second has placeholder.""" tool_calls = [ {"name": "tool1", "args": {"value": "normal"}}, {"name": "tool2", "args": {"value": "<result-placeholder>"}}, ] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_nested_args_with_placeholder(self): """Test with nested args structure.""" tool_calls = [{"name": "tool", "args": {"outer": {"inner": "<result>"}}}] # Note: Current implementation only checks top-level values has_placeholder, _ = detect_placeholder_in_args(tool_calls) # Should not detect nested placeholders with current implementation assert has_placeholder is False def test_case_insensitive_detection(self): """Test that detection is case insensitive.""" tool_calls = [{"name": "tool", "args": {"value": "<RESULT-FROM-API>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_tool_call_without_name(self): """Test tool call without name field.""" tool_calls = [{"args": {"value": "<result>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_tool_call_without_args(self): """Test tool call without args field.""" tool_calls = [{"name": "tool"}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False def test_normal_angle_brackets_not_detected(self): """Test that normal angle brackets in code are not detected.""" tool_calls = [{"name": "tool", "args": {"code": "if x < 10 and y > 5:"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False def test_html_tags_not_detected(self): """Test that HTML tags are not detected as placeholders.""" tool_calls = [{"name": "tool", "args": {"html": "<div>content</div>"}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False # ============================================================================= # Tests for PLACEHOLDER_PATTERN regex # ============================================================================= class TestPlaceholderPattern: """Test suite for PLACEHOLDER_PATTERN regex.""" @pytest.mark.parametrize( ("test_input", "expected"), [ # Should match ("<result-from-search>", True), ("<value-extracted>", True), ("<output-data>", True), ("<response-from-api>", True), ("<data-field>", True), ("<from-previous-step>", True), ("<extract-this>", True), ("<previous-result>", True), ("<current-date>", True), ("<date-value>", True), ("<input-from-user>", True), ("<query-result>", True), ("<search-output>", True), ("<tool-output>", True), ("<RESULT-FROM-API>", True), # Case insensitive ("<Result-Value>", True), # Mixed case # Should not match ("<div>", False), ("<span>", False), ("<button>", False), ("<html>", False), ("<p>", False), ("<a>", False), ("< >", False), ("<>", False), ("<123>", False), ("<abc>", False), # No keywords ("normal text", False), ("", False), ], ) def test_placeholder_pattern_matching(self, test_input, expected): """Test PLACEHOLDER_PATTERN matches expected patterns.""" result = bool(PLACEHOLDER_PATTERN.search(test_input)) assert result == expected, f"Pattern '{test_input}' should {'match' if expected else 'not match'}" def test_pattern_extracts_full_placeholder(self): """Test that pattern extracts the full placeholder.""" text = "Calculate <result-from-search> minus 5" match = PLACEHOLDER_PATTERN.search(text) assert match is not None assert match.group() == "<result-from-search>" def test_pattern_finds_multiple_placeholders(self): """Test pattern can find multiple placeholders.""" text = "Use <result-from-a> and <output-from-b>" matches = PLACEHOLDER_PATTERN.findall(text) assert len(matches) == 2 # ============================================================================= # Tests for create_granite_agent function # ============================================================================= class TestCreateGraniteAgent: """Test suite for create_granite_agent function.""" def test_raises_error_without_bind_tools(self): """Test that ValueError is raised when LLM lacks bind_tools.""" mock_llm = Mock(spec=[]) # No bind_tools method mock_tools = [Mock(name="tool1")] mock_prompt = Mock() with pytest.raises(ValueError, match="bind_tools"): create_granite_agent(mock_llm, mock_tools, mock_prompt) def test_creates_agent_with_valid_inputs(self): """Test agent creation with valid inputs.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [Mock(name="tool1")] mock_prompt = Mock() mock_prompt.invoke = Mock(return_value=Mock(messages=[])) agent = create_granite_agent(mock_llm, mock_tools, mock_prompt) assert agent is not None # Verify bind_tools was called with both tool_choice options assert mock_llm.bind_tools.call_count == 2 def test_bind_tools_called_with_required(self): """Test that bind_tools is called with tool_choice='required'.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [Mock(name="tool1")] mock_prompt = Mock() create_granite_agent(mock_llm, mock_tools, mock_prompt) calls = mock_llm.bind_tools.call_args_list tool_choices = [call[1].get("tool_choice") for call in calls] assert "required" in tool_choices def test_bind_tools_called_with_auto(self): """Test that bind_tools is called with tool_choice='auto'.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [Mock(name="tool1")] mock_prompt = Mock() create_granite_agent(mock_llm, mock_tools, mock_prompt) calls = mock_llm.bind_tools.call_args_list tool_choices = [call[1].get("tool_choice") for call in calls] assert "auto" in tool_choices def test_empty_tools_list(self): """Test agent creation with empty tools list.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_prompt = Mock() agent = create_granite_agent(mock_llm, [], mock_prompt) assert agent is not None def test_custom_forced_iterations(self): """Test agent creation with custom forced_iterations.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [Mock(name="tool1")] mock_prompt = Mock() agent = create_granite_agent(mock_llm, mock_tools, mock_prompt, forced_iterations=5) assert agent is not None class TestCreateGraniteAgentDynamicInvoke: """Test suite for the dynamic_invoke inner function in create_granite_agent.""" def setup_method(self): """Set up common mocks for each test.""" self.mock_llm = Mock() self.mock_llm_required = Mock() self.mock_llm_auto = Mock() def bind_tools_side_effect(_tools, tool_choice=None): if tool_choice == "required": return self.mock_llm_required return self.mock_llm_auto self.mock_llm.bind_tools = Mock(side_effect=bind_tools_side_effect) self.mock_prompt = Mock() self.mock_prompt.invoke = Mock(return_value=Mock(messages=[])) self.mock_tools = [Mock(name="tool1")] def test_uses_required_for_first_iteration(self): """Test that tool_choice='required' is used for first iteration.""" self.mock_llm_required.invoke = Mock(return_value=AIMessage(content="response")) agent = create_granite_agent(self.mock_llm, self.mock_tools, self.mock_prompt) # Invoke with no intermediate steps (first iteration) inputs = {"input": "test", "intermediate_steps": []} # The agent is a RunnableLambda | ToolsAgentOutputParser chain # We need to invoke the first part (RunnableLambda) # This will raise because ToolsAgentOutputParser expects AIMessage with tool_calls with ( patch("lfx.components.langchain_utilities.ibm_granite_handler.format_to_tool_messages", return_value=[]), contextlib.suppress(Exception), ): agent.invoke(inputs) self.mock_llm_required.invoke.assert_called() def test_uses_auto_after_forced_iterations(self): """Test that tool_choice='auto' is used after forced iterations.""" self.mock_llm_auto.invoke = Mock(return_value=AIMessage(content="final response")) agent = create_granite_agent(self.mock_llm, self.mock_tools, self.mock_prompt, forced_iterations=2) # Invoke with 2 intermediate steps (past forced iterations) inputs = {"input": "test", "intermediate_steps": [("action1", "result1"), ("action2", "result2")]} with ( patch("lfx.components.langchain_utilities.ibm_granite_handler.format_to_tool_messages", return_value=[]), contextlib.suppress(Exception), ): agent.invoke(inputs) self.mock_llm_auto.invoke.assert_called() def test_placeholder_detection_triggers_corrective_message(self): """Test that placeholder detection triggers corrective message.""" # Create response with placeholder in tool calls mock_response = Mock() mock_response.tool_calls = [{"name": "calculator", "args": {"expression": "<result-from-search>"}}] self.mock_llm_required.invoke = Mock(return_value=mock_response) self.mock_llm_auto.invoke = Mock(return_value=AIMessage(content="corrected response")) agent = create_granite_agent(self.mock_llm, self.mock_tools, self.mock_prompt) inputs = {"input": "test", "intermediate_steps": []} with ( patch("lfx.components.langchain_utilities.ibm_granite_handler.format_to_tool_messages", return_value=[]), contextlib.suppress(Exception), ): agent.invoke(inputs) # After placeholder detection, llm_auto should be called with corrective message assert self.mock_llm_auto.invoke.called # ============================================================================= # Integration tests with ToolCallingAgentComponent # ============================================================================= class TestToolCallingAgentIntegration: """Integration tests for ToolCallingAgentComponent with IBM WatsonX.""" def test_watsonx_detection_in_create_agent_runnable(self): """Test that WatsonX models are detected in create_agent_runnable.""" from lfx.components.langchain_utilities import ToolCallingAgentComponent # Create a mock WatsonX LLM (simulating ChatWatsonx) mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "ibm/granite-13b-chat-v2" mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [create_mock_tool("test_tool"), create_mock_tool("test_tool2")] component = ToolCallingAgentComponent() component.tools = mock_tools component.system_prompt = "Test prompt" with ( patch.object(component, "_get_llm", return_value=mock_llm), patch("lfx.components.langchain_utilities.tool_calling.create_granite_agent") as mock_create, ): mock_create.return_value = Mock() component.create_agent_runnable() # Verify create_granite_agent was called (for WatsonX models) mock_create.assert_called_once() def test_watsonx_llama_uses_default_agent(self): """Test that Llama model on WatsonX uses default agent (not Granite-specific).""" from lfx.components.langchain_utilities import ToolCallingAgentComponent # Create a mock WatsonX LLM with Llama model (non-Granite) mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "meta-llama/llama-3-2-11b-vision" mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [create_mock_tool("tool1"), create_mock_tool("tool2")] component = ToolCallingAgentComponent() component.tools = mock_tools component.system_prompt = "Test prompt" with ( patch.object(component, "_get_llm", return_value=mock_llm), patch("lfx.components.langchain_utilities.tool_calling.create_tool_calling_agent") as mock_default, ): mock_default.return_value = Mock() component.create_agent_runnable() # Verify create_tool_calling_agent was called (default behavior for non-Granite) mock_default.assert_called_once() def test_non_watsonx_uses_default_agent(self): """Test that non-WatsonX models use the default agent creation.""" from lfx.components.langchain_utilities import ToolCallingAgentComponent # Create a mock non-WatsonX LLM (e.g., OpenAI) mock_llm = Mock() mock_llm.__class__.__name__ = "ChatOpenAI" mock_llm.__class__.__module__ = "langchain_openai" mock_llm.model_id = "gpt-4" mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [create_mock_tool("test_tool")] component = ToolCallingAgentComponent() component.tools = mock_tools component.system_prompt = "Test prompt" with ( patch.object(component, "_get_llm", return_value=mock_llm), patch("lfx.components.langchain_utilities.tool_calling.create_tool_calling_agent") as mock_create, ): mock_create.return_value = Mock() component.create_agent_runnable() # Verify create_tool_calling_agent was called mock_create.assert_called_once() def test_system_prompt_enhanced_for_watsonx(self): """Test that system prompt is enhanced for WatsonX models.""" from lfx.components.langchain_utilities import ToolCallingAgentComponent mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "ibm/granite-13b-chat-v2" mock_llm.bind_tools = Mock(return_value=mock_llm) mock_tools = [create_mock_tool("tool1"), create_mock_tool("tool2")] component = ToolCallingAgentComponent() component.tools = mock_tools component.system_prompt = "Original prompt" with ( patch.object(component, "_get_llm", return_value=mock_llm), patch("lfx.components.langchain_utilities.tool_calling.create_granite_agent") as mock_create, ): mock_create.return_value = Mock() component.create_agent_runnable() # Verify enhanced prompt is stored separately (original is not mutated) assert component.system_prompt == "Original prompt" assert hasattr(component, "_effective_system_prompt") assert "TOOL USAGE GUIDELINES" in component._effective_system_prompt def test_system_prompt_not_enhanced_without_tools(self): """Test that system prompt is not enhanced when no tools.""" from lfx.components.langchain_utilities import ToolCallingAgentComponent mock_llm = Mock() mock_llm.__class__.__name__ = "ChatWatsonx" mock_llm.model_id = "ibm/granite-13b-chat-v2" mock_llm.bind_tools = Mock(return_value=mock_llm) component = ToolCallingAgentComponent() component.tools = [] component.system_prompt = "Original prompt" with ( patch.object(component, "_get_llm", return_value=mock_llm), patch("lfx.components.langchain_utilities.tool_calling.create_tool_calling_agent") as mock_create, ): mock_create.return_value = Mock() component.create_agent_runnable() # Verify system prompt was NOT enhanced (no _effective_system_prompt set) assert component.system_prompt == "Original prompt" assert not hasattr(component, "_effective_system_prompt") # ============================================================================= # Edge case and error handling tests # ============================================================================= class TestEdgeCases: """Test edge cases and error handling.""" def test_is_granite_model_with_integer_model_id(self): """Test handling of non-string model_id.""" mock_llm = Mock() mock_llm.model_id = 12345 result = is_granite_model(mock_llm) assert result is False def test_detect_placeholder_with_special_characters(self): """Test placeholder detection with special regex characters.""" tool_calls = [{"name": "tool", "args": {"value": "<result-from-search.+*?>"}}] # Should not raise regex error has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is True def test_get_enhanced_system_prompt_preserves_base(self): """Test that base prompt is always preserved.""" base_prompt = "Very important system instructions that must be kept." mock_tools = [create_mock_tool("t1"), create_mock_tool("t2")] result = get_enhanced_system_prompt(base_prompt, mock_tools) assert result.startswith(base_prompt) def test_create_granite_agent_with_none_tools(self): """Test agent creation when tools is None.""" mock_llm = Mock() mock_llm.bind_tools = Mock(return_value=mock_llm) mock_prompt = Mock() # Should handle None tools gracefully agent = create_granite_agent(mock_llm, None, mock_prompt) assert agent is not None def test_placeholder_in_numeric_value(self): """Test that numeric values don't trigger placeholder detection.""" tool_calls = [{"name": "calculator", "args": {"value": 12345}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False def test_placeholder_in_list_value(self): """Test handling of list values in args.""" tool_calls = [{"name": "tool", "args": {"items": ["<result>", "normal"]}}] # Current implementation doesn't check list items has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False def test_is_granite_with_watsonx_model_id(self): """Test detection with full WatsonX model ID format.""" mock_llm = Mock() mock_llm.model_id = "ibm/granite-3-8b-instruct" result = is_granite_model(mock_llm) assert result is True def test_empty_args_dict(self): """Test with empty args dictionary.""" tool_calls = [{"name": "tool", "args": {}}] has_placeholder, _ = detect_placeholder_in_args(tool_calls) assert has_placeholder is False
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/models_and_agents/test_ibm_granite_handler.py", "license": "MIT License", "lines": 686, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/graph/test_cache_restoration.py
"""Tests for cache restoration behavior in build_vertex. This module tests the fix for the issue where cache restoration failure would leave vertex.built = True, causing subsequent build() calls to return early without setting vertex.result. Bug scenario (before fix): 1. Frozen vertex, cache hit -> vertex.built = True restored from cache 2. finalize_build() throws exception -> should_build = True 3. build() is called, but sees frozen AND built -> returns early 4. finalize_build() never called again -> vertex.result is None 5. Error: "no result found for vertex" Fix: Reset vertex.built = False when cache restoration fails, so build() runs fully and sets vertex.result correctly. """ from unittest.mock import AsyncMock, Mock import pytest class TestCacheRestorationBuiltFlagReset: """Tests for vertex.built flag reset when cache restoration fails. These tests verify that when finalize_build() fails during cache restoration, vertex.built is reset to False so that subsequent build() calls work correctly. """ @pytest.fixture def mock_vertex(self): """Create a mock vertex for testing cache restoration.""" vertex = Mock() vertex.id = "test-vertex-id" vertex.frozen = True vertex.built = False vertex.is_loop = False vertex.display_name = "TestComponent" vertex.result = None vertex.artifacts = {} vertex.built_object = {} vertex.built_result = {} vertex.full_data = {} vertex.results = {} vertex.build = AsyncMock() vertex.finalize_build = Mock() vertex.built_object_repr = Mock(return_value="test") return vertex @pytest.fixture def cached_vertex_dict(self): """Create a cached vertex dictionary.""" return { "built": True, "artifacts": {"test": "artifact"}, "built_object": {"test": "object"}, "built_result": {"test": "result"}, "full_data": {"test": "data"}, "results": {"test": "results"}, } def test_should_reset_built_flag_when_finalize_build_fails(self, mock_vertex, cached_vertex_dict): """Test that vertex.built is reset to False when finalize_build raises exception. Given: A frozen vertex with cached state restored (built=True) When: finalize_build() throws an exception Then: vertex.built should be reset to False """ # Arrange mock_vertex.built = cached_vertex_dict["built"] # True from cache mock_vertex.finalize_build.side_effect = ValueError("Test error") # Act - simulate the cache restoration logic should_build = False try: mock_vertex.finalize_build() if mock_vertex.result is not None: mock_vertex.result.used_frozen_result = True except Exception: mock_vertex.built = False # This is the fix should_build = True # Assert assert mock_vertex.built is False, "vertex.built should be reset to False after finalize_build failure" assert should_build is True, "should_build should be True to trigger rebuild" def test_should_reset_built_flag_when_key_error_on_cache_access(self, mock_vertex): """Test that vertex.built is reset to False when KeyError occurs during cache access. Given: A frozen vertex with partial cached state When: KeyError occurs when accessing cache dict Then: vertex.built should be reset to False """ # Arrange mock_vertex.built = True # Assume it was set somehow # Simulate incomplete cache dict (missing key) cached_result = {"result": {}} # Missing required keys # Act - simulate the cache restoration logic should_build = False try: cached_vertex_dict = cached_result["result"] _ = cached_vertex_dict["built"] # This would raise KeyError except KeyError: mock_vertex.built = False # This is the fix should_build = True # Assert assert mock_vertex.built is False, "vertex.built should be reset to False after KeyError" assert should_build is True, "should_build should be True to trigger rebuild" def test_build_returns_early_when_built_flag_not_reset(self, mock_vertex): """Test the bug scenario: build() returns early when built=True is not reset. This test demonstrates the bug that occurs WITHOUT the fix: When frozen=True and built=True, build() returns early without calling finalize_build(), leaving result=None. """ # Arrange - simulate the broken state (before fix) mock_vertex.frozen = True mock_vertex.built = True # Not reset after failed cache restoration mock_vertex.result = None is_loop_component = mock_vertex.display_name == "Loop" or mock_vertex.is_loop # Act - simulate build() decision should_return_early = mock_vertex.frozen and mock_vertex.built and not is_loop_component # Assert - this is the problematic behavior assert should_return_early is True, "build() would return early with unreset built flag" assert mock_vertex.result is None, "result remains None because finalize_build was never called" def test_build_continues_when_built_flag_is_reset(self, mock_vertex): """Test the fix: build() continues when built=False after reset. This test demonstrates the correct behavior WITH the fix: When built=False (reset after failed cache restoration), build() continues normally and calls finalize_build(). """ # Arrange - simulate the fixed state (after fix) mock_vertex.frozen = True mock_vertex.built = False # Reset after failed cache restoration is_loop_component = mock_vertex.display_name == "Loop" or mock_vertex.is_loop # Act - simulate build() decision should_return_early = mock_vertex.frozen and mock_vertex.built and not is_loop_component # Assert - build() should NOT return early assert should_return_early is False, "build() should continue with reset built flag" class TestCacheRestorationSuccessCase: """Tests for successful cache restoration.""" @pytest.fixture def mock_vertex_with_result(self): """Create a mock vertex that successfully restores from cache.""" vertex = Mock() vertex.id = "test-vertex-id" vertex.frozen = True vertex.built = True vertex.is_loop = False vertex.display_name = "TestComponent" vertex.result = Mock() # Has a result vertex.artifacts = {"test": "artifact"} return vertex def test_should_not_modify_built_flag_on_successful_restoration(self, mock_vertex_with_result): """Test that vertex.built remains True when cache restoration succeeds. Given: A frozen vertex with valid cached state When: finalize_build() succeeds Then: vertex.built should remain True and should_build should be False """ # Arrange mock_vertex_with_result.finalize_build = Mock() # No exception # Act - simulate successful cache restoration should_build = False try: mock_vertex_with_result.finalize_build() if mock_vertex_with_result.result is not None: mock_vertex_with_result.result.used_frozen_result = True except Exception: mock_vertex_with_result.built = False should_build = True # Assert assert mock_vertex_with_result.built is True, "vertex.built should remain True on success" assert should_build is False, "should_build should be False on success" assert mock_vertex_with_result.result.used_frozen_result is True class TestCacheRestorationEdgeCases: """Edge case tests for cache restoration.""" def test_non_frozen_vertex_should_always_build(self): """Test that non-frozen vertex always builds regardless of built flag. Given: A non-frozen vertex When: Checking if should build Then: should_build should be True regardless of other flags """ # Arrange vertex = Mock() vertex.frozen = False vertex.built = True vertex.is_loop = False vertex.display_name = "TestComponent" is_loop_component = vertex.display_name == "Loop" or vertex.is_loop # Act - simulate build_vertex decision should_build = not vertex.frozen or is_loop_component # Assert assert should_build is True, "Non-frozen vertex should always build" def test_cache_miss_should_trigger_build(self): """Test that cache miss triggers build. Given: A frozen vertex with cache miss When: Checking if should build Then: should_build should be True """ # Arrange class CacheMiss: pass cached_result = CacheMiss() # Act - simulate cache miss check should_build = isinstance(cached_result, CacheMiss) # Assert assert should_build is True, "Cache miss should trigger build" def test_loop_component_should_always_build_even_when_frozen(self): """Test that Loop component always builds even when frozen and built. Given: A frozen Loop component with built=True When: Checking if should build Then: should_build should be True (loops need to iterate) """ # Arrange vertex = Mock() vertex.frozen = True vertex.built = True vertex.is_loop = True vertex.display_name = "Loop" is_loop_component = vertex.display_name == "Loop" or vertex.is_loop # Act - simulate build_vertex decision should_build = not vertex.frozen or is_loop_component # Assert assert should_build is True, "Loop component should always build" def test_multiple_finalize_build_failures_all_reset_built_flag(self): """Test that multiple types of exceptions all reset built flag. Given: Various exceptions that could occur in finalize_build When: Each exception is raised Then: vertex.built should be reset to False in all cases """ exceptions_to_test = [ ValueError("Test value error"), TypeError("Test type error"), KeyError("Test key error"), AttributeError("Test attribute error"), RuntimeError("Test runtime error"), ] for exception in exceptions_to_test: # Arrange vertex = Mock() vertex.built = True vertex.finalize_build = Mock(side_effect=exception) # Act should_build = False try: vertex.finalize_build() except Exception: vertex.built = False should_build = True # Assert assert vertex.built is False, f"vertex.built should be reset for {type(exception).__name__}" assert should_build is True, f"should_build should be True for {type(exception).__name__}" class TestCacheRestorationIntegration: """Integration-style tests simulating full cache restoration flow.""" @pytest.fixture def mock_graph(self): """Create a mock graph for testing.""" graph = Mock() graph.get_vertex = Mock() graph.run_manager = Mock() graph.run_manager.add_to_vertices_being_run = Mock() return graph def test_full_flow_with_finalize_build_failure(self): """Test the complete flow when finalize_build fails during cache restoration. This simulates the exact scenario that was causing the bug: 1. Frozen vertex, cache hit 2. Restore state from cache (built=True) 3. finalize_build() fails 4. built flag should be reset 5. build() should run fully """ # Arrange vertex = Mock() vertex.id = "ChatInput-abc123" vertex.frozen = True vertex.built = False vertex.is_loop = False vertex.display_name = "Chat Input" vertex.result = None cached_vertex_dict = { "built": True, "artifacts": {}, "built_object": {"message": Mock()}, "built_result": {"message": Mock()}, "full_data": {}, "results": {"message": Mock()}, } # Simulate finalize_build failure def finalize_build_that_fails(): msg = "Simulated finalize_build failure" raise ValueError(msg) vertex.finalize_build = finalize_build_that_fails # Act - simulate build_vertex logic should_build = False is_loop_component = vertex.display_name == "Loop" or vertex.is_loop if not vertex.frozen or is_loop_component: should_build = True else: # Simulate cache hit - restore state vertex.built = cached_vertex_dict["built"] vertex.artifacts = cached_vertex_dict["artifacts"] vertex.built_object = cached_vertex_dict["built_object"] vertex.built_result = cached_vertex_dict["built_result"] vertex.full_data = cached_vertex_dict["full_data"] vertex.results = cached_vertex_dict["results"] try: vertex.finalize_build() except Exception: vertex.built = False # THE FIX should_build = True # Assert assert should_build is True, "should_build should be True after finalize_build failure" assert vertex.built is False, "vertex.built should be reset to False" # Verify that build() will NOT return early should_return_early = vertex.frozen and vertex.built and not is_loop_component assert should_return_early is False, "build() should NOT return early with reset built flag" def test_second_run_scenario_with_fix(self): """Test the exact scenario reported: first run works, second run fails. This test simulates: 1. First run: vertex builds normally 2. Second run: cache restoration fails, but fix ensures rebuild works """ # First run - simulates successful initial build vertex = Mock() vertex.id = "ChatInput-ybc2G" vertex.frozen = True vertex.built = False vertex.is_loop = False vertex.display_name = "Chat Input" vertex.result = None # Simulate first run: should_build = True (not frozen initially or no cache) # After first run: vertex.built = True, vertex.result = Mock() vertex.built = True vertex.result = Mock() # First run sets result # Second run - cache hit but finalize_build fails # This simulates a new vertex instance with same ID vertex_run2 = Mock() vertex_run2.id = "ChatInput-ybc2G" vertex_run2.frozen = True vertex_run2.built = False # New instance starts with built=False vertex_run2.is_loop = False vertex_run2.display_name = "Chat Input" vertex_run2.result = None # New instance starts with result=None cached_vertex_dict = { "built": True, # From first run "artifacts": {}, "built_object": {"message": Mock()}, "built_result": {"message": Mock()}, "full_data": {}, "results": {"message": Mock()}, } # Simulate cache restoration failure vertex_run2.finalize_build = Mock(side_effect=ValueError("Simulated failure")) # Act - simulate build_vertex for second run should_build = False is_loop_component = vertex_run2.display_name == "Loop" or vertex_run2.is_loop if not vertex_run2.frozen or is_loop_component: should_build = True else: # Cache hit - restore state vertex_run2.built = cached_vertex_dict["built"] # Set to True try: vertex_run2.finalize_build() except Exception: vertex_run2.built = False # THE FIX - reset to False should_build = True # Assert - with the fix, the vertex should rebuild correctly assert vertex_run2.built is False, "vertex.built should be reset after cache restoration failure" assert should_build is True, "should_build should trigger rebuild" # Verify build() won't return early should_return_early = vertex_run2.frozen and vertex_run2.built and not is_loop_component assert should_return_early is False, "build() should continue with reset built flag"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/graph/test_cache_restoration.py", "license": "MIT License", "lines": 357, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/cli/test_lazy_imports.py
"""Tests for CLI lazy import mechanisms. These tests verify that the lazy import patterns in CLI modules work correctly and help reduce cold start time. """ import json import pytest class TestCLIModuleLazyImports: """Test lazy imports in CLI __init__ module.""" def test_serve_command_accessible_via_getattr(self): """Test that serve_command can be accessed via lazy import.""" import lfx.cli # Access serve_command - this should trigger lazy loading serve_cmd = lfx.cli.serve_command assert serve_cmd is not None assert callable(serve_cmd) def test_invalid_cli_attribute_raises_error(self): """Test that accessing invalid attribute raises AttributeError.""" import lfx.cli with pytest.raises(AttributeError, match="has no attribute 'nonexistent_command'"): _ = lfx.cli.nonexistent_command class TestMainModuleLazyImports: """Test lazy imports in __main__ module.""" def test_main_module_importable(self): """Test that __main__ module can be imported without heavy dependencies.""" # This test verifies the module imports quickly import lfx.__main__ assert hasattr(lfx.__main__, "main") assert callable(lfx.__main__.main) def test_serve_command_wrapper_exists(self): """Test that serve_command_wrapper is defined.""" from lfx.__main__ import serve_command_wrapper assert callable(serve_command_wrapper) def test_run_command_wrapper_exists(self): """Test that run_command_wrapper is defined.""" from lfx.__main__ import run_command_wrapper assert callable(run_command_wrapper) class TestRunCommandLazyImports: """Test lazy imports in run.py module.""" def test_run_module_importable(self): """Test that run module can be imported.""" from lfx.cli import run assert hasattr(run, "run") def test_script_loader_functions_not_imported_at_module_level(self): """Test that script_loader functions are imported lazily.""" # Import the run module from lfx.cli import run # The script_loader module should NOT be in sys.modules at this point # unless something else imported it # This is a soft test - we mainly want to ensure the module loads # Verify the run function exists and is callable assert hasattr(run, "run") assert callable(run.run) class TestValidationLazyImports: """Test lazy imports in validation.py module.""" def test_validation_module_importable(self): """Test that validation module can be imported.""" from lfx.cli import validation assert hasattr(validation, "is_valid_env_var_name") assert hasattr(validation, "validate_global_variables_for_env") def test_is_valid_env_var_name_works(self): """Test that is_valid_env_var_name function works correctly.""" from lfx.cli.validation import is_valid_env_var_name assert is_valid_env_var_name("VALID_VAR") assert is_valid_env_var_name("_PRIVATE") assert is_valid_env_var_name("VAR123") assert not is_valid_env_var_name("invalid-var") assert not is_valid_env_var_name("123_STARTS_WITH_NUMBER") assert not is_valid_env_var_name("has space") class TestScriptLoaderLazyImports: """Test lazy imports in script_loader.py module.""" def test_script_loader_module_importable(self): """Test that script_loader module can be imported.""" from lfx.cli import script_loader assert hasattr(script_loader, "load_graph_from_script") assert hasattr(script_loader, "find_graph_variable") assert hasattr(script_loader, "extract_message_from_result") assert hasattr(script_loader, "extract_text_from_result") def test_find_graph_variable_works(self, tmp_path): """Test that find_graph_variable function works.""" from lfx.cli.script_loader import find_graph_variable # Create a test script with a graph variable script_content = """ from lfx.graph import Graph graph = Graph.from_payload({}) """ script_path = tmp_path / "test_script.py" script_path.write_text(script_content) result = find_graph_variable(script_path) assert result is not None assert result["type"] == "function_call" assert "Graph" in result["function"] def test_find_graph_variable_with_get_graph_function(self, tmp_path): """Test that find_graph_variable detects get_graph functions.""" from lfx.cli.script_loader import find_graph_variable # Create a test script with get_graph function script_content = """ def get_graph(): from lfx.graph import Graph return Graph.from_payload({}) """ script_path = tmp_path / "test_script.py" script_path.write_text(script_content) result = find_graph_variable(script_path) assert result is not None assert result["type"] == "function_definition" assert result["function"] == "get_graph" def test_find_graph_variable_returns_none_for_no_graph(self, tmp_path): """Test that find_graph_variable returns None when no graph is found.""" from lfx.cli.script_loader import find_graph_variable script_content = """ x = 1 y = 2 """ script_path = tmp_path / "test_script.py" script_path.write_text(script_content) result = find_graph_variable(script_path) assert result is None class TestFlowDictHandling: """Test in-memory flow dict handling in run command.""" def test_json_parsing_for_flow_json(self): """Test that flow_json string can be parsed to dict.""" flow_json = '{"data": {"nodes": [], "edges": []}}' flow_dict = json.loads(flow_json) assert isinstance(flow_dict, dict) assert "data" in flow_dict assert flow_dict["data"]["nodes"] == [] assert flow_dict["data"]["edges"] == [] def test_json_parsing_for_complex_flow(self): """Test parsing a more complex flow structure.""" flow_json = json.dumps( { "data": { "nodes": [ {"id": "node1", "type": "ChatInput", "data": {}}, {"id": "node2", "type": "ChatOutput", "data": {}}, ], "edges": [{"source": "node1", "target": "node2"}], } } ) flow_dict = json.loads(flow_json) assert len(flow_dict["data"]["nodes"]) == 2 assert len(flow_dict["data"]["edges"]) == 1 assert flow_dict["data"]["nodes"][0]["id"] == "node1" def test_invalid_json_raises_error(self): """Test that invalid JSON raises appropriate error.""" invalid_json = '{"data": invalid}' with pytest.raises(json.JSONDecodeError): json.loads(invalid_json)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/cli/test_lazy_imports.py", "license": "MIT License", "lines": 149, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/field_typing/test_lazy_imports.py
"""Tests for field_typing lazy import mechanism. These tests verify that the __getattr__ lazy import pattern in field_typing/__init__.py works correctly and returns the expected types. """ import pytest class TestFieldTypingLazyImports: """Test that field_typing exports are accessible via lazy imports.""" def test_input_import(self): """Test that Input can be imported from field_typing.""" from lfx.field_typing import Input from lfx.template.field.base import Input as DirectInput assert Input is DirectInput def test_output_import(self): """Test that Output can be imported from field_typing.""" from lfx.field_typing import Output from lfx.template.field.base import Output as DirectOutput assert Output is DirectOutput def test_range_spec_import(self): """Test that RangeSpec can be imported from field_typing.""" from lfx.field_typing import RangeSpec from lfx.field_typing.range_spec import RangeSpec as DirectRangeSpec assert RangeSpec is DirectRangeSpec def test_data_import(self): """Test that Data can be imported from field_typing.""" from lfx.field_typing import Data from lfx.schema.data import Data as DirectData assert Data is DirectData def test_constants_imports(self): """Test that constants from langchain are accessible.""" from lfx.field_typing import ( AgentExecutor, BaseChatMemory, BaseChatModel, BaseDocumentCompressor, BaseLanguageModel, BaseLLM, BaseLoader, BaseMemory, BaseOutputParser, BasePromptTemplate, BaseRetriever, Chain, ChatPromptTemplate, Document, Embeddings, PromptTemplate, TextSplitter, Tool, VectorStore, ) # Verify they are not None (actual types or stubs) assert AgentExecutor is not None assert BaseChatMemory is not None assert BaseChatModel is not None assert BaseDocumentCompressor is not None assert BaseLanguageModel is not None assert BaseLLM is not None assert BaseLoader is not None assert BaseMemory is not None assert BaseOutputParser is not None assert BasePromptTemplate is not None assert BaseRetriever is not None assert Chain is not None assert ChatPromptTemplate is not None assert Document is not None assert Embeddings is not None assert PromptTemplate is not None assert TextSplitter is not None assert Tool is not None assert VectorStore is not None def test_type_aliases_import(self): """Test that type aliases are accessible.""" from lfx.field_typing import ( Callable, Code, LanguageModel, NestedDict, Object, Retriever, Text, ) assert Callable is not None assert Code is not None assert LanguageModel is not None assert NestedDict is not None assert Object is not None assert Retriever is not None assert Text is not None def test_invalid_attribute_raises_error(self): """Test that accessing invalid attribute raises AttributeError.""" import lfx.field_typing with pytest.raises(AttributeError, match="has no attribute 'NonExistentType'"): _ = lfx.field_typing.NonExistentType def test_all_exports_are_accessible(self): """Test that all items in __all__ are accessible.""" import lfx.field_typing for name in lfx.field_typing.__all__: attr = getattr(lfx.field_typing, name) assert attr is not None, f"{name} should be accessible" def test_repeated_access_returns_same_object(self): """Test that repeated access returns the same object (caching works).""" from lfx.field_typing import Data as Data1 from lfx.field_typing import Data as Data2 assert Data1 is Data2 def test_constants_match_direct_imports(self): """Test that constants from field_typing match direct imports from constants module.""" from lfx.field_typing import Data, Document, Object from lfx.field_typing.constants import Data as DirectData from lfx.field_typing.constants import Document as DirectDocument from lfx.field_typing.constants import Object as DirectObject assert Data is DirectData assert Document is DirectDocument assert Object is DirectObject class TestFieldTypingModuleStructure: """Test the structure of the field_typing module.""" def test_all_list_exists(self): """Test that __all__ is defined.""" import lfx.field_typing assert hasattr(lfx.field_typing, "__all__") assert isinstance(lfx.field_typing.__all__, list) assert len(lfx.field_typing.__all__) > 0 def test_getattr_function_exists(self): """Test that __getattr__ is defined for lazy loading.""" import lfx.field_typing # The module should have __getattr__ defined # We verify this indirectly by checking we can access lazy attributes assert hasattr(lfx.field_typing, "Input") or "Input" in lfx.field_typing.__all__ def test_constants_names_set_exists(self): """Test that _CONSTANTS_NAMES is defined.""" import lfx.field_typing assert hasattr(lfx.field_typing, "_CONSTANTS_NAMES") assert isinstance(lfx.field_typing._CONSTANTS_NAMES, set)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/field_typing/test_lazy_imports.py", "license": "MIT License", "lines": 132, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_auth_jwt_algorithms.py
"""Comprehensive tests for JWT algorithm support (HS256, RS256, RS512). Tests cover: - AuthSettings configuration for each algorithm - RSA key generation and persistence - Token creation and verification - Error cases and edge cases - Authentication failure scenarios """ import tempfile from datetime import timedelta from pathlib import Path from unittest.mock import AsyncMock, MagicMock, patch import jwt import pytest from fastapi import HTTPException from jwt import InvalidTokenError from pydantic import SecretStr class TestAuthSettingsAlgorithms: """Test AuthSettings configuration for different JWT algorithms.""" def test_default_algorithm_is_hs256(self): """Default algorithm should be HS256 for backward compatibility (when not overridden by env).""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: # Explicitly set HS256 to test the setting works (env var may override default) settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") assert settings.ALGORITHM == "HS256" def test_hs256_generates_secret_key(self): """HS256 should generate a secret key automatically.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") secret_key = settings.SECRET_KEY.get_secret_value() assert secret_key is not None assert len(secret_key) >= 32 def test_rs256_generates_rsa_key_pair(self): """RS256 should generate RSA key pair automatically.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") private_key = settings.PRIVATE_KEY.get_secret_value() public_key = settings.PUBLIC_KEY assert private_key is not None assert public_key is not None assert "-----BEGIN PRIVATE KEY-----" in private_key assert "-----BEGIN PUBLIC KEY-----" in public_key def test_rs512_generates_rsa_key_pair(self): """RS512 should generate RSA key pair automatically.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS512") private_key = settings.PRIVATE_KEY.get_secret_value() public_key = settings.PUBLIC_KEY assert private_key is not None assert public_key is not None assert "-----BEGIN PRIVATE KEY-----" in private_key assert "-----BEGIN PUBLIC KEY-----" in public_key def test_rsa_keys_persisted_to_files(self): """RSA keys should be persisted to files in CONFIG_DIR.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") private_key_path = Path(tmpdir) / "private_key.pem" public_key_path = Path(tmpdir) / "public_key.pem" assert private_key_path.exists() assert public_key_path.exists() # Verify file contents match settings assert private_key_path.read_text() == settings.PRIVATE_KEY.get_secret_value() assert public_key_path.read_text() == settings.PUBLIC_KEY def test_rsa_keys_loaded_from_existing_files(self): """RSA keys should be loaded from existing files.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: # First run - generate keys settings1 = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") original_private = settings1.PRIVATE_KEY.get_secret_value() original_public = settings1.PUBLIC_KEY # Second run - should load existing keys settings2 = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") assert settings2.PRIVATE_KEY.get_secret_value() == original_private assert original_public == settings2.PUBLIC_KEY def test_custom_private_key_derives_public_key(self): """When custom private key is provided, public key should be derived.""" from lfx.services.settings.auth import AuthSettings from lfx.services.settings.utils import generate_rsa_key_pair custom_private, expected_public = generate_rsa_key_pair() with tempfile.TemporaryDirectory() as tmpdir: settings = AuthSettings( CONFIG_DIR=tmpdir, ALGORITHM="RS256", PRIVATE_KEY=SecretStr(custom_private), ) assert settings.PRIVATE_KEY.get_secret_value() == custom_private assert expected_public == settings.PUBLIC_KEY def test_no_config_dir_generates_keys_in_memory(self): """Without CONFIG_DIR, keys should be generated in memory.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR="", ALGORITHM="RS256") assert settings.PRIVATE_KEY.get_secret_value() is not None assert settings.PUBLIC_KEY is not None assert "-----BEGIN PRIVATE KEY-----" in settings.PRIVATE_KEY.get_secret_value() def test_hs256_does_not_generate_rsa_keys(self): """HS256 should not trigger RSA key generation.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") private_key_path = Path(tmpdir) / "private_key.pem" public_key_path = Path(tmpdir) / "public_key.pem" # RSA key files should not be created for HS256 assert not private_key_path.exists() assert not public_key_path.exists() def test_invalid_algorithm_rejected(self): """Invalid algorithm should be rejected by pydantic.""" from lfx.services.settings.auth import AuthSettings from pydantic import ValidationError with tempfile.TemporaryDirectory() as tmpdir, pytest.raises(ValidationError): AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="INVALID") class TestRSAKeyGeneration: """Test RSA key pair generation utility.""" def test_generate_rsa_key_pair_returns_valid_keys(self): """Generated keys should be valid PEM format.""" from lfx.services.settings.utils import generate_rsa_key_pair private_key, public_key = generate_rsa_key_pair() assert "-----BEGIN PRIVATE KEY-----" in private_key assert "-----END PRIVATE KEY-----" in private_key assert "-----BEGIN PUBLIC KEY-----" in public_key assert "-----END PUBLIC KEY-----" in public_key def test_generated_keys_are_unique(self): """Each call should generate unique keys.""" from lfx.services.settings.utils import generate_rsa_key_pair private1, public1 = generate_rsa_key_pair() private2, public2 = generate_rsa_key_pair() assert private1 != private2 assert public1 != public2 def test_generated_keys_can_sign_and_verify(self): """Generated keys should work for JWT signing and verification.""" from lfx.services.settings.utils import generate_rsa_key_pair private_key, public_key = generate_rsa_key_pair() # Sign a token payload = {"sub": "test-user", "type": "access"} token = jwt.encode(payload, private_key, algorithm="RS256") # Verify the token decoded = jwt.decode(token, public_key, algorithms=["RS256"]) assert decoded["sub"] == "test-user" assert decoded["type"] == "access" class TestTokenCreation: """Test JWT token creation with different algorithms.""" def _create_mock_settings_service(self, algorithm, tmpdir): """Helper to create mock settings service.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM=algorithm) mock_service = MagicMock() mock_service.auth_settings = settings return mock_service def test_create_token_hs256(self): """Token creation with HS256 should use secret key.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, expires_delta=timedelta(hours=1), ) assert token is not None # Verify token header shows HS256 header = jwt.get_unverified_header(token) assert header["alg"] == "HS256" def test_create_token_rs256(self): """Token creation with RS256 should use private key.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): token = create_token( data={"sub": "user-456", "type": "access"}, expires_delta=timedelta(hours=1), ) assert token is not None # Verify token header shows RS256 header = jwt.get_unverified_header(token) assert header["alg"] == "RS256" def test_create_token_rs512(self): """Token creation with RS512 should use private key.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS512", tmpdir) mock_auth_service = AuthService(mock_settings_service) with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): token = create_token( data={"sub": "user-789", "type": "access"}, expires_delta=timedelta(hours=1), ) assert token is not None # Verify token header shows RS512 header = jwt.get_unverified_header(token) assert header["alg"] == "RS512" def test_token_contains_expiration(self): """Created token should contain expiration claim.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, expires_delta=timedelta(hours=1), ) # Decode without verification to check claims claims = jwt.decode(token, options={"verify_signature": False}) assert "exp" in claims class TestTokenVerification: """Test JWT token verification with different algorithms.""" def _create_mock_settings_service(self, algorithm, tmpdir): """Helper to create mock settings service.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM=algorithm) mock_service = MagicMock() mock_service.auth_settings = settings return mock_service @pytest.mark.asyncio async def test_verify_hs256_token_success(self): """Valid HS256 token should be verified successfully.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) # Create a mock user mock_user = MagicMock() mock_user.id = "9cd4172c-0190-4124-a749-671d23e3c6dd" mock_user.is_active = True mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, expires_delta=timedelta(hours=1), ) user = await get_current_user_from_access_token(token, mock_db) assert user == mock_user @pytest.mark.asyncio async def test_verify_rs256_token_success(self): """Valid RS256 token should be verified successfully.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_user = MagicMock() mock_user.id = "user-456" mock_user.is_active = True mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): token = create_token( data={"sub": "user-456", "type": "access"}, expires_delta=timedelta(hours=1), ) user = await get_current_user_from_access_token(token, mock_db) assert user == mock_user @pytest.mark.asyncio async def test_verify_rs512_token_success(self): """Valid RS512 token should be verified successfully.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS512", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_user = MagicMock() mock_user.id = "user-789" mock_user.is_active = True mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): token = create_token( data={"sub": "user-789", "type": "access"}, expires_delta=timedelta(hours=1), ) user = await get_current_user_from_access_token(token, mock_db) assert user == mock_user class TestAuthenticationFailures: """Test authentication failure scenarios.""" def _create_mock_settings_service(self, algorithm, tmpdir, **overrides): """Helper to create mock settings service with optional overrides.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM=algorithm) # Apply overrides for key, value in overrides.items(): object.__setattr__(settings, key, value) mock_service = MagicMock() mock_service.auth_settings = settings return mock_service @pytest.mark.asyncio async def test_missing_public_key_rs256_raises_401(self): """Missing public key for RS256 should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS256", tmpdir, PUBLIC_KEY="") mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token("some-token", mock_db) assert exc_info.value.status_code == 401 assert "Public key not configured" in exc_info.value.detail @pytest.mark.asyncio async def test_missing_secret_key_hs256_raises_401(self): """Missing secret key for HS256 should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token from lfx.services.settings.auth import JWTAlgorithm # Create a fully mocked settings service without using AuthSettings mock_auth_settings = MagicMock() mock_auth_settings.ALGORITHM = JWTAlgorithm.HS256 mock_auth_settings.SECRET_KEY = MagicMock() mock_auth_settings.SECRET_KEY.get_secret_value.return_value = None mock_settings_service = MagicMock() mock_settings_service.auth_settings = mock_auth_settings mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token("some-token", mock_db) assert exc_info.value.status_code == 401 assert "Secret key not configured" in exc_info.value.detail @pytest.mark.asyncio async def test_invalid_token_raises_401(self): """Invalid token should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token("invalid-token-format", mock_db) assert exc_info.value.status_code == 401 assert "Invalid token" in exc_info.value.detail @pytest.mark.asyncio async def test_token_signed_with_wrong_key_raises_401(self): """Token signed with different key should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) # Create token with different secret wrong_token = jwt.encode( {"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, "different-secret-key", algorithm="HS256", ) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(wrong_token, mock_db) assert exc_info.value.status_code == 401 @pytest.mark.asyncio async def test_expired_token_raises_401(self): """Expired token should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): # Create token that's already expired token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, expires_delta=timedelta(seconds=-10), # Negative = already expired ) with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(token, mock_db) assert exc_info.value.status_code == 401 # PyJWT library raises InvalidTokenError for expired tokens before our custom check assert "expired" in exc_info.value.detail.lower() or "invalid token" in exc_info.value.detail.lower() @pytest.mark.asyncio async def test_token_without_user_id_raises_401(self): """Token without user ID should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) # Create token without 'sub' claim token = jwt.encode( {"type": "access"}, mock_settings_service.auth_settings.SECRET_KEY.get_secret_value(), algorithm="HS256", ) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(token, mock_db) assert exc_info.value.status_code == 401 assert "Invalid token" in exc_info.value.detail or "Expected access token" in exc_info.value.detail @pytest.mark.asyncio async def test_token_without_type_raises_401(self): """Token without type should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) # Create token without 'type' claim token = jwt.encode( {"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd"}, mock_settings_service.auth_settings.SECRET_KEY.get_secret_value(), algorithm="HS256", ) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(token, mock_db) assert exc_info.value.status_code == 401 assert ( "invalid" in exc_info.value.detail.lower() or "expected access token" in exc_info.value.detail.lower() ) @pytest.mark.asyncio async def test_user_not_found_raises_403(self): """Token for non-existent user should raise 403 (InvalidCredentialsError).""" from uuid import uuid4 from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() # Use a valid UUID format user_id = str(uuid4()) # Create async function that returns None async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return None with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): token = create_token( data={"sub": user_id, "type": "access"}, expires_delta=timedelta(hours=1), ) with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(token, mock_db) assert exc_info.value.status_code == 403 assert "User not found" in exc_info.value.detail or "inactive" in exc_info.value.detail.lower() @pytest.mark.asyncio async def test_inactive_user_raises_401(self): """Token for inactive user should raise 401.""" from uuid import uuid4 from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token, get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) # Use a valid UUID format user_id = str(uuid4()) mock_user = MagicMock() mock_user.id = user_id mock_user.is_active = False mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): token = create_token( data={"sub": user_id, "type": "access"}, expires_delta=timedelta(hours=1), ) with pytest.raises(HTTPException) as exc_info: await get_current_user_from_access_token(token, mock_db) assert exc_info.value.status_code == 401 assert "inactive" in exc_info.value.detail.lower() class TestRefreshTokenVerification: """Test refresh token verification with different algorithms.""" def _create_mock_settings_service(self, algorithm, tmpdir): """Helper to create mock settings service.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM=algorithm) mock_service = MagicMock() mock_service.auth_settings = settings return mock_service @pytest.mark.asyncio async def test_refresh_token_rs256_success(self): """Valid RS256 refresh token should create new tokens.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_refresh_token, create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("RS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_user = MagicMock() mock_user.id = "9cd4172c-0190-4124-a749-671d23e3c6dd" mock_user.is_active = True mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): # Create refresh token refresh_token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "refresh"}, expires_delta=timedelta(days=7), ) # Use refresh token to get new tokens new_tokens = await create_refresh_token(refresh_token, mock_db) assert "access_token" in new_tokens assert "refresh_token" in new_tokens assert new_tokens.get("token_type") == "bearer" @pytest.mark.asyncio async def test_refresh_token_wrong_type_raises_401(self): """Access token used as refresh token should raise 401.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_refresh_token, create_token with tempfile.TemporaryDirectory() as tmpdir: mock_settings_service = self._create_mock_settings_service("HS256", tmpdir) mock_auth_service = AuthService(mock_settings_service) mock_db = AsyncMock() with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): # Create access token (not refresh) access_token = create_token( data={"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, expires_delta=timedelta(hours=1), ) with pytest.raises(HTTPException) as exc_info: await create_refresh_token(access_token, mock_db) assert exc_info.value.status_code == 401 assert "Invalid refresh token" in exc_info.value.detail class TestAlgorithmMismatch: """Test scenarios where algorithm configuration changes.""" def test_hs256_token_fails_with_rs256_verification(self): """Token created with HS256 should fail RS256 verification.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: # Create token with HS256 hs256_settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") token = jwt.encode( {"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, hs256_settings.SECRET_KEY.get_secret_value(), algorithm="HS256", ) with tempfile.TemporaryDirectory() as tmpdir2: # Try to verify with RS256 rs256_settings = AuthSettings(CONFIG_DIR=tmpdir2, ALGORITHM="RS256") with pytest.raises(InvalidTokenError): jwt.decode(token, rs256_settings.PUBLIC_KEY, algorithms=["RS256"]) def test_rs256_token_fails_with_hs256_verification(self): """Token created with RS256 should fail HS256 verification.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: # Create token with RS256 rs256_settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") token = jwt.encode( {"sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access"}, rs256_settings.PRIVATE_KEY.get_secret_value(), algorithm="RS256", ) with tempfile.TemporaryDirectory() as tmpdir2: # Try to verify with HS256 hs256_settings = AuthSettings(CONFIG_DIR=tmpdir2, ALGORITHM="HS256") with pytest.raises(InvalidTokenError): jwt.decode( token, hs256_settings.SECRET_KEY.get_secret_value(), algorithms=["HS256"], ) class TestKeyPersistence: """Test key persistence and file permissions.""" def test_secret_key_file_created(self): """Secret key should be saved to file.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") secret_key_path = Path(tmpdir) / "secret_key" assert secret_key_path.exists() def test_rsa_key_files_created(self): """RSA keys should be saved to files.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") private_key_path = Path(tmpdir) / "private_key.pem" public_key_path = Path(tmpdir) / "public_key.pem" assert private_key_path.exists() assert public_key_path.exists() def test_keys_reloaded_on_restart(self): """Keys should be consistent across settings reloads.""" from lfx.services.settings.auth import AuthSettings with tempfile.TemporaryDirectory() as tmpdir: # First load settings1 = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") private1 = settings1.PRIVATE_KEY.get_secret_value() public1 = settings1.PUBLIC_KEY # Simulate restart settings2 = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="RS256") private2 = settings2.PRIVATE_KEY.get_secret_value() public2 = settings2.PUBLIC_KEY assert private1 == private2 assert public1 == public2 class TestEdgeCases: """Test edge cases and boundary conditions.""" def test_empty_config_dir_string(self): """Empty CONFIG_DIR string should work (in-memory keys).""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR="", ALGORITHM="RS256") assert settings.PRIVATE_KEY.get_secret_value() is not None assert settings.PUBLIC_KEY is not None @pytest.mark.asyncio async def test_token_with_extra_claims(self): """Token with extra claims should still work.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import get_current_user_from_access_token with tempfile.TemporaryDirectory() as tmpdir: from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") token = jwt.encode( { "sub": "9cd4172c-0190-4124-a749-671d23e3c6dd", "type": "access", "extra_claim": "some-value", "another": 123, }, settings.SECRET_KEY.get_secret_value(), algorithm="HS256", ) mock_settings_service = MagicMock() mock_settings_service.auth_settings = settings mock_auth_service = AuthService(mock_settings_service) mock_user = MagicMock() mock_user.id = "9cd4172c-0190-4124-a749-671d23e3c6dd" mock_user.is_active = True mock_db = AsyncMock() # Create async function that returns mock_user async def mock_get_user_by_id(*args, **kwargs): # noqa: ARG001 return mock_user with ( patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service), patch("langflow.services.auth.service.get_user_by_id", side_effect=mock_get_user_by_id), ): user = await get_current_user_from_access_token(token, mock_db) assert user == mock_user def test_very_long_user_id(self): """Very long user ID should work.""" from langflow.services.auth.service import AuthService from langflow.services.auth.utils import create_token with tempfile.TemporaryDirectory() as tmpdir: from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM="HS256") mock_settings_service = MagicMock() mock_settings_service.auth_settings = settings mock_auth_service = AuthService(mock_settings_service) long_user_id = "a" * 1000 with patch("langflow.services.auth.utils.get_auth_service", return_value=mock_auth_service): token = create_token( data={"sub": long_user_id, "type": "access"}, expires_delta=timedelta(hours=1), ) claims = jwt.decode(token, options={"verify_signature": False}) assert claims["sub"] == long_user_id class TestJWTKeyHelpers: """Test JWT key helper functions.""" def _create_mock_settings_service(self, algorithm, tmpdir): """Helper to create mock settings service.""" from lfx.services.settings.auth import AuthSettings settings = AuthSettings(CONFIG_DIR=tmpdir, ALGORITHM=algorithm) mock_service = MagicMock() mock_service.auth_settings = settings return mock_service def test_get_jwt_verification_key_hs256_returns_secret_key(self): """HS256 should return secret key for verification.""" from langflow.services.auth.utils import get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("HS256", tmpdir) key = get_jwt_verification_key(mock_service) assert key == mock_service.auth_settings.SECRET_KEY.get_secret_value() assert len(key) >= 32 def test_get_jwt_verification_key_rs256_returns_public_key(self): """RS256 should return public key for verification.""" from langflow.services.auth.utils import get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS256", tmpdir) key = get_jwt_verification_key(mock_service) assert key == mock_service.auth_settings.PUBLIC_KEY assert "-----BEGIN PUBLIC KEY-----" in key def test_get_jwt_verification_key_rs512_returns_public_key(self): """RS512 should return public key for verification.""" from langflow.services.auth.utils import get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS512", tmpdir) key = get_jwt_verification_key(mock_service) assert key == mock_service.auth_settings.PUBLIC_KEY assert "-----BEGIN PUBLIC KEY-----" in key def test_get_jwt_verification_key_missing_public_key_raises_error(self): """Missing public key for asymmetric algorithm should raise JWTKeyError.""" from langflow.services.auth.utils import JWTKeyError, get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS256", tmpdir) object.__setattr__(mock_service.auth_settings, "PUBLIC_KEY", "") with pytest.raises(JWTKeyError) as exc_info: get_jwt_verification_key(mock_service) assert exc_info.value.status_code == 401 assert "Public key not configured" in exc_info.value.detail def test_get_jwt_verification_key_missing_secret_key_raises_error(self): """Missing secret key for HS256 should raise JWTKeyError.""" from langflow.services.auth.utils import JWTKeyError, get_jwt_verification_key from lfx.services.settings.auth import JWTAlgorithm mock_auth_settings = MagicMock() mock_auth_settings.ALGORITHM = JWTAlgorithm.HS256 mock_auth_settings.SECRET_KEY = MagicMock() mock_auth_settings.SECRET_KEY.get_secret_value.return_value = None mock_service = MagicMock() mock_service.auth_settings = mock_auth_settings with pytest.raises(JWTKeyError) as exc_info: get_jwt_verification_key(mock_service) assert exc_info.value.status_code == 401 assert "Secret key not configured" in exc_info.value.detail def test_get_jwt_signing_key_hs256_returns_secret_key(self): """HS256 should return secret key for signing.""" from langflow.services.auth.utils import get_jwt_signing_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("HS256", tmpdir) key = get_jwt_signing_key(mock_service) assert key == mock_service.auth_settings.SECRET_KEY.get_secret_value() def test_get_jwt_signing_key_rs256_returns_private_key(self): """RS256 should return private key for signing.""" from langflow.services.auth.utils import get_jwt_signing_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS256", tmpdir) key = get_jwt_signing_key(mock_service) assert key == mock_service.auth_settings.PRIVATE_KEY.get_secret_value() assert "-----BEGIN PRIVATE KEY-----" in key def test_get_jwt_signing_key_rs512_returns_private_key(self): """RS512 should return private key for signing.""" from langflow.services.auth.utils import get_jwt_signing_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS512", tmpdir) key = get_jwt_signing_key(mock_service) assert key == mock_service.auth_settings.PRIVATE_KEY.get_secret_value() assert "-----BEGIN PRIVATE KEY-----" in key def test_verification_and_signing_keys_work_together_hs256(self): """Verification and signing keys should work together for HS256.""" from langflow.services.auth.utils import get_jwt_signing_key, get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("HS256", tmpdir) signing_key = get_jwt_signing_key(mock_service) verification_key = get_jwt_verification_key(mock_service) # For symmetric algorithms, both keys are the same assert signing_key == verification_key # Sign and verify a token payload = {"sub": "test-user", "type": "access"} token = jwt.encode(payload, signing_key, algorithm="HS256") decoded = jwt.decode(token, verification_key, algorithms=["HS256"]) assert decoded["sub"] == "test-user" def test_verification_and_signing_keys_work_together_rs256(self): """Verification and signing keys should work together for RS256.""" from langflow.services.auth.utils import get_jwt_signing_key, get_jwt_verification_key with tempfile.TemporaryDirectory() as tmpdir: mock_service = self._create_mock_settings_service("RS256", tmpdir) signing_key = get_jwt_signing_key(mock_service) verification_key = get_jwt_verification_key(mock_service) # For asymmetric algorithms, keys are different assert signing_key != verification_key # Sign and verify a token payload = {"sub": "test-user", "type": "access"} token = jwt.encode(payload, signing_key, algorithm="RS256") decoded = jwt.decode(token, verification_key, algorithms=["RS256"]) assert decoded["sub"] == "test-user"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_auth_jwt_algorithms.py", "license": "MIT License", "lines": 795, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/services/transaction/factory.py
"""Transaction service factory for langflow.""" from __future__ import annotations from typing import TYPE_CHECKING from langflow.services.factory import ServiceFactory from langflow.services.transaction.service import TransactionService if TYPE_CHECKING: from langflow.services.settings.service import SettingsService class TransactionServiceFactory(ServiceFactory): """Factory for creating TransactionService instances.""" def __init__(self): super().__init__(TransactionService) def create(self, settings_service: SettingsService): """Create a new TransactionService instance. Args: settings_service: The settings service for checking if transactions are enabled. Returns: A new TransactionService instance. """ return TransactionService(settings_service)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/transaction/factory.py", "license": "MIT License", "lines": 19, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
langflow-ai/langflow:src/backend/base/langflow/services/transaction/service.py
"""Transaction service implementation for langflow.""" from __future__ import annotations from typing import TYPE_CHECKING, Any from uuid import UUID from lfx.log.logger import logger from lfx.services.deps import session_scope from lfx.services.interfaces import TransactionServiceProtocol from langflow.services.base import Service from langflow.services.database.models.transactions.crud import log_transaction as crud_log_transaction from langflow.services.database.models.transactions.model import TransactionBase if TYPE_CHECKING: from langflow.services.settings.service import SettingsService class TransactionService(Service, TransactionServiceProtocol): """Concrete implementation of transaction logging service. This service handles logging of component execution transactions to the database, tracking inputs, outputs, and status of each vertex build. """ name = "transaction_service" def __init__(self, settings_service: SettingsService): """Initialize the transaction service. Args: settings_service: The settings service for checking if transactions are enabled. """ self.settings_service = settings_service async def log_transaction( self, flow_id: str, vertex_id: str, inputs: dict[str, Any] | None, outputs: dict[str, Any] | None, status: str, target_id: str | None = None, error: str | None = None, ) -> None: """Log a transaction record for a vertex execution. Args: flow_id: The flow ID (as string) vertex_id: The vertex/component ID inputs: Input parameters for the component outputs: Output results from the component status: Execution status (success/error) target_id: Optional target vertex ID error: Optional error message """ if not self.is_enabled(): return try: flow_uuid = UUID(flow_id) if isinstance(flow_id, str) else flow_id transaction = TransactionBase( vertex_id=vertex_id, target_id=target_id, inputs=inputs, outputs=outputs, status=status, error=error, flow_id=flow_uuid, ) async with session_scope() as session: await crud_log_transaction(session, transaction) except Exception as exc: # noqa: BLE001 logger.debug(f"Error logging transaction: {exc!s}") def is_enabled(self) -> bool: """Check if transaction logging is enabled. Returns: True if transaction logging is enabled, False otherwise. """ return getattr(self.settings_service.settings, "transactions_storage_enabled", False)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/services/transaction/service.py", "license": "MIT License", "lines": 67, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/api/v1/test_transactions.py
"""Tests for transactions API endpoints and models.""" from datetime import datetime, timezone from uuid import uuid4 import pytest from fastapi import status from httpx import AsyncClient from langflow.services.database.models.transactions.crud import ( transform_transaction_table, transform_transaction_table_for_logs, ) from langflow.services.database.models.transactions.model import ( TransactionBase, TransactionLogsResponse, TransactionReadResponse, TransactionTable, _is_sensitive_key, sanitize_data, ) class TestTransactionModels: """Tests for transaction model classes.""" def test_transaction_base_creation(self): """Test creating a TransactionBase instance.""" flow_id = uuid4() transaction = TransactionBase( vertex_id="test-vertex-123", target_id="target-vertex-456", inputs={"key": "value"}, outputs={"result": "success"}, status="success", flow_id=flow_id, ) assert transaction.vertex_id == "test-vertex-123" assert transaction.target_id == "target-vertex-456" assert transaction.inputs == {"key": "value"} assert transaction.outputs == {"result": "success"} assert transaction.status == "success" assert transaction.flow_id == flow_id assert transaction.error is None def test_transaction_base_with_error(self): """Test creating a TransactionBase with error status.""" flow_id = uuid4() transaction = TransactionBase( vertex_id="test-vertex-123", status="error", error="Something went wrong", flow_id=flow_id, ) assert transaction.status == "error" assert transaction.error == "Something went wrong" def test_transaction_base_filters_code_from_inputs(self): """Test that 'code' key is filtered from inputs via sanitize_data.""" flow_id = uuid4() inputs_with_code = {"key": "value", "code": "def foo(): pass"} transaction = TransactionBase( vertex_id="test-vertex", inputs=inputs_with_code, status="success", flow_id=flow_id, ) # The original dict should not be modified assert "code" in inputs_with_code # But the transaction inputs should not have 'code' assert "code" not in transaction.inputs assert transaction.inputs["key"] == "value" def test_transaction_base_sanitizes_sensitive_data_in_inputs(self): """Test that sensitive data like api_key is masked in inputs.""" flow_id = uuid4() inputs_with_api_key = { "api_key": "sk-proj-MBZ6RyzaqpMgw_wwa123456789", "template": "Hello world", } transaction = TransactionBase( vertex_id="test-vertex", inputs=inputs_with_api_key, status="success", flow_id=flow_id, ) # The api_key should be masked assert transaction.inputs["api_key"] == "sk-p...6789" # Non-sensitive data should remain unchanged assert transaction.inputs["template"] == "Hello world" def test_transaction_base_sanitizes_sensitive_data_in_outputs(self): """Test that sensitive data like password is masked in outputs.""" flow_id = uuid4() # Short password (<=12 chars) should be fully redacted outputs_with_short_password = { "password": "short", "result": "success", } transaction = TransactionBase( vertex_id="test-vertex", outputs=outputs_with_short_password, status="success", flow_id=flow_id, ) # Short passwords should be fully redacted assert transaction.outputs["password"] == "***REDACTED***" # noqa: S105 # Non-sensitive data should remain unchanged assert transaction.outputs["result"] == "success" # Long password (>12 chars) should be partially masked outputs_with_long_password = { "password": "supersecret123456", "result": "ok", } transaction2 = TransactionBase( vertex_id="test-vertex-2", outputs=outputs_with_long_password, status="success", flow_id=flow_id, ) # Long passwords should show first 4 and last 4 chars assert transaction2.outputs["password"] == "supe...3456" # noqa: S105 assert transaction2.outputs["result"] == "ok" def test_transaction_base_sanitizes_nested_sensitive_data(self): """Test that nested sensitive data is also masked.""" flow_id = uuid4() inputs_nested = { "config": { "openai_api_key": "sk-12345678901234567890", "model": "gpt-4", }, "text": "Hello", } transaction = TransactionBase( vertex_id="test-vertex", inputs=inputs_nested, status="success", flow_id=flow_id, ) # Nested api_key should be masked assert transaction.inputs["config"]["openai_api_key"] == "sk-1...7890" # Non-sensitive nested data should remain unchanged assert transaction.inputs["config"]["model"] == "gpt-4" assert transaction.inputs["text"] == "Hello" def test_transaction_base_flow_id_string_conversion(self): """Test that string flow_id is converted to UUID.""" flow_id_str = "12345678-1234-5678-1234-567812345678" transaction = TransactionBase( vertex_id="test-vertex", status="success", flow_id=flow_id_str, ) from uuid import UUID assert isinstance(transaction.flow_id, UUID) assert str(transaction.flow_id) == flow_id_str def test_transaction_logs_response_from_table(self): """Test creating TransactionLogsResponse from TransactionTable.""" table = TransactionTable( id=uuid4(), vertex_id="test-vertex", target_id="target-vertex", inputs={"input": "data"}, outputs={"output": "result"}, status="success", error=None, flow_id=uuid4(), timestamp=datetime.now(timezone.utc), ) response = TransactionLogsResponse.model_validate(table, from_attributes=True) assert response.id == table.id assert response.vertex_id == table.vertex_id assert response.target_id == table.target_id assert response.status == table.status # TransactionLogsResponse should not have error and flow_id fields assert not hasattr(response, "error") or "error" not in response.model_fields assert not hasattr(response, "flow_id") or "flow_id" not in response.model_fields class TestSanitizeData: """Tests for the sanitize_data function and related utilities.""" def test_sanitize_data_returns_none_for_none_input(self): """Test that sanitize_data returns None when input is None.""" assert sanitize_data(None) is None def test_sanitize_data_masks_api_key(self): """Test that api_key values are masked.""" data = {"api_key": "sk-proj-1234567890abcdef"} result = sanitize_data(data) assert result["api_key"] == "sk-p...cdef" def test_sanitize_data_masks_password(self): """Test that password values are masked.""" data = {"password": "short"} result = sanitize_data(data) assert result["password"] == "***REDACTED***" # noqa: S105 def test_sanitize_data_masks_various_sensitive_keys(self): """Test that various sensitive key patterns are masked.""" data = { "api_key": "sk-1234567890123456", "api-key": "sk-1234567890123456", "apikey": "sk-1234567890123456", "password": "secretpassword123", "secret": "mysecret12345678", "token": "mytoken123456789", "credential": "mycredential1234", "auth": "myauthvalue12345", "bearer": "mybearertoken123", "private_key": "myprivatekey1234", "access_key": "myaccesskey12345", } result = sanitize_data(data) for key in data: # All sensitive keys should be masked assert "***" in result[key] or "..." in result[key], f"Key '{key}' was not masked" def test_sanitize_data_preserves_non_sensitive_data(self): """Test that non-sensitive data is preserved.""" data = { "model": "gpt-4", "temperature": 0.7, "max_tokens": 100, "messages": [{"role": "user", "content": "Hello"}], } result = sanitize_data(data) assert result == data def test_sanitize_data_handles_nested_dicts(self): """Test that nested dictionaries are sanitized.""" data = { "config": { "api_key": "sk-nested12345678901", "model": "gpt-4", } } result = sanitize_data(data) assert "..." in result["config"]["api_key"] assert result["config"]["model"] == "gpt-4" def test_sanitize_data_handles_lists(self): """Test that lists containing dicts are sanitized.""" data = { "items": [ {"api_key": "sk-list1234567890123", "name": "item1"}, {"api_key": "sk-list1234567890124", "name": "item2"}, ] } result = sanitize_data(data) assert "..." in result["items"][0]["api_key"] assert "..." in result["items"][1]["api_key"] assert result["items"][0]["name"] == "item1" assert result["items"][1]["name"] == "item2" def test_sanitize_data_removes_code_key(self): """Test that 'code' key is completely removed.""" data = {"code": "def foo(): pass", "value": "keep me"} result = sanitize_data(data) assert "code" not in result assert result["value"] == "keep me" def test_sanitize_data_case_insensitive(self): """Test that key matching is case insensitive.""" data = { "API_KEY": "sk-upper1234567890123", "Password": "mixedcase123456", "SECRET": "allcaps12345678901", } result = sanitize_data(data) for key in data: assert "***" in result[key] or "..." in result[key], f"Key '{key}' was not masked" def test_is_sensitive_key_matches_expected_keys(self): """Test that _is_sensitive_key correctly identifies sensitive keys.""" should_match = [ "api_key", "api-key", "apikey", "API_KEY", "password", "PASSWORD", "secret", "SECRET", "token", "TOKEN", "credential", "CREDENTIAL", "auth", "AUTH", "bearer", "BEARER", "private_key", "private-key", "access_key", "access-key", "openai_api_key", "anthropic_api_key", "auth_token", "access_token", ] for key in should_match: assert _is_sensitive_key(key), f"Key '{key}' should be identified as sensitive" should_not_match = [ "model", "temperature", "max_tokens", "messages", "name", "value", "result", "status", "author", "authentication_method", ] for key in should_not_match: assert not _is_sensitive_key(key), f"Key '{key}' should NOT be identified as sensitive" class TestTransactionTransformers: """Tests for transaction transformer functions.""" def test_transform_transaction_table_single(self): """Test transforming a single TransactionTable.""" table = TransactionTable( id=uuid4(), vertex_id="test-vertex", status="success", flow_id=uuid4(), ) result = transform_transaction_table(table) assert isinstance(result, TransactionReadResponse) def test_transform_transaction_table_list(self): """Test transforming a list of TransactionTable.""" tables = [ TransactionTable(id=uuid4(), vertex_id=f"vertex-{i}", status="success", flow_id=uuid4()) for i in range(3) ] result = transform_transaction_table(tables) assert isinstance(result, list) assert len(result) == 3 assert all(isinstance(r, TransactionReadResponse) for r in result) def test_transform_transaction_table_for_logs_single(self): """Test transforming a single TransactionTable for logs view.""" table = TransactionTable( id=uuid4(), vertex_id="test-vertex", status="success", flow_id=uuid4(), ) result = transform_transaction_table_for_logs(table) assert isinstance(result, TransactionLogsResponse) def test_transform_transaction_table_for_logs_list(self): """Test transforming a list of TransactionTable for logs view.""" tables = [ TransactionTable(id=uuid4(), vertex_id=f"vertex-{i}", status="success", flow_id=uuid4()) for i in range(3) ] result = transform_transaction_table_for_logs(tables) assert isinstance(result, list) assert len(result) == 3 assert all(isinstance(r, TransactionLogsResponse) for r in result) class TestTransactionWithOutputs: """Tests for transaction with explicit outputs parameter.""" def test_transaction_base_with_explicit_outputs(self): """Test creating TransactionBase with explicit outputs dict.""" flow_id = uuid4() outputs = { "output": {"message": "Hello World", "type": "text"}, "another_output": {"message": {"key": "value"}, "type": "object"}, } transaction = TransactionBase( vertex_id="test-vertex", inputs={"input_value": "test"}, outputs=outputs, status="success", flow_id=flow_id, ) assert transaction.outputs is not None assert "output" in transaction.outputs assert transaction.outputs["output"]["message"] == "Hello World" assert transaction.outputs["output"]["type"] == "text" def test_transaction_base_outputs_sanitization(self): """Test that outputs with sensitive data are sanitized.""" flow_id = uuid4() outputs = { "result": { "message": "success", "api_key": "sk-1234567890abcdef1234", } } transaction = TransactionBase( vertex_id="test-vertex", outputs=outputs, status="success", flow_id=flow_id, ) # The nested api_key should be masked assert "..." in transaction.outputs["result"]["api_key"] assert transaction.outputs["result"]["message"] == "success" def test_transaction_table_with_outputs(self): """Test creating TransactionTable with outputs.""" flow_id = uuid4() outputs = {"component_output": {"message": "Built successfully", "type": "text"}} table = TransactionTable( id=uuid4(), vertex_id="test-vertex", inputs={"param": "value"}, outputs=outputs, status="success", flow_id=flow_id, ) assert table.outputs is not None assert "component_output" in table.outputs assert table.outputs["component_output"]["message"] == "Built successfully" def test_transaction_logs_response_includes_outputs(self): """Test that TransactionLogsResponse includes outputs field.""" table = TransactionTable( id=uuid4(), vertex_id="test-vertex", inputs={"input": "data"}, outputs={"output": {"message": "result", "type": "text"}}, status="success", flow_id=uuid4(), timestamp=datetime.now(timezone.utc), ) response = TransactionLogsResponse.model_validate(table, from_attributes=True) assert response.outputs is not None assert "output" in response.outputs class TestTransactionsEndpoint: """Tests for the /monitor/transactions endpoint.""" async def test_get_transactions_requires_auth(self, client: AsyncClient): """Test that GET /monitor/transactions requires authentication.""" response = await client.get("api/v1/monitor/transactions?flow_id=00000000-0000-0000-0000-000000000000") assert response.status_code == status.HTTP_403_FORBIDDEN @pytest.mark.usefixtures("active_user") async def test_get_transactions_returns_paginated_response(self, client: AsyncClient, logged_in_headers): """Test that GET /monitor/transactions returns paginated response.""" flow_id = "00000000-0000-0000-0000-000000000000" response = await client.get(f"api/v1/monitor/transactions?flow_id={flow_id}", headers=logged_in_headers) assert response.status_code == status.HTTP_200_OK result = response.json() assert "items" in result assert "total" in result assert "page" in result assert "size" in result assert "pages" in result assert isinstance(result["items"], list) @pytest.mark.usefixtures("active_user") async def test_get_transactions_with_pagination_params(self, client: AsyncClient, logged_in_headers): """Test GET /monitor/transactions with custom pagination parameters.""" flow_id = "00000000-0000-0000-0000-000000000000" response = await client.get( f"api/v1/monitor/transactions?flow_id={flow_id}&page=1&size=10", headers=logged_in_headers ) assert response.status_code == status.HTTP_200_OK result = response.json() assert result["page"] == 1 assert result["size"] == 10 @pytest.mark.usefixtures("active_user") async def test_get_transactions_requires_flow_id(self, client: AsyncClient, logged_in_headers): """Test that GET /monitor/transactions requires flow_id parameter.""" response = await client.get("api/v1/monitor/transactions", headers=logged_in_headers) assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY @pytest.mark.usefixtures("active_user") async def test_get_transactions_invalid_flow_id_format(self, client: AsyncClient, logged_in_headers): """Test GET /monitor/transactions with invalid flow_id format.""" response = await client.get("api/v1/monitor/transactions?flow_id=invalid-uuid", headers=logged_in_headers) assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY @pytest.mark.usefixtures("active_user") async def test_get_transactions_response_structure(self, client: AsyncClient, logged_in_headers): """Test that transaction response items have the expected structure.""" flow_id = uuid4() response = await client.get(f"api/v1/monitor/transactions?flow_id={flow_id}", headers=logged_in_headers) assert response.status_code == status.HTTP_200_OK result = response.json() # Verify pagination structure assert "items" in result assert "total" in result assert "page" in result assert "size" in result assert "pages" in result assert isinstance(result["items"], list)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v1/test_transactions.py", "license": "MIT License", "lines": 455, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/services/transaction/test_deps.py
"""Tests for transaction service dependency injection.""" from unittest.mock import MagicMock, patch from lfx.services.deps import get_transaction_service from lfx.services.interfaces import TransactionServiceProtocol from lfx.services.transaction.service import NoopTransactionService class TestGetTransactionService: """Test suite for get_transaction_service function.""" def test_should_return_none_when_no_service_registered(self) -> None: """Verify returns None when no transaction service is registered.""" with patch("lfx.services.deps.get_service", return_value=None): result = get_transaction_service() assert result is None def test_should_return_service_when_registered(self) -> None: """Verify returns the registered service instance.""" mock_service = MagicMock(spec=TransactionServiceProtocol) with patch("lfx.services.deps.get_service", return_value=mock_service): result = get_transaction_service() assert result is mock_service def test_should_return_noop_service_when_noop_registered(self) -> None: """Verify returns NoopTransactionService when it's registered.""" noop_service = NoopTransactionService() with patch("lfx.services.deps.get_service", return_value=noop_service): result = get_transaction_service() assert result is noop_service assert isinstance(result, TransactionServiceProtocol) def test_should_call_get_service_with_correct_type(self) -> None: """Verify get_service is called with TRANSACTION_SERVICE type.""" from lfx.services.schema import ServiceType with patch("lfx.services.deps.get_service") as mock_get_service: mock_get_service.return_value = None get_transaction_service() mock_get_service.assert_called_once_with(ServiceType.TRANSACTION_SERVICE)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/services/transaction/test_deps.py", "license": "MIT License", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/services/transaction/test_factory.py
"""Tests for TransactionServiceFactory.""" from unittest.mock import MagicMock import pytest from langflow.services.factory import ServiceFactory from langflow.services.schema import ServiceType from langflow.services.transaction.factory import TransactionServiceFactory from langflow.services.transaction.service import TransactionService class TestTransactionServiceFactory: """Test suite for TransactionServiceFactory.""" @pytest.fixture def factory(self) -> TransactionServiceFactory: """Create a TransactionServiceFactory instance for testing.""" return TransactionServiceFactory() @pytest.fixture def mock_settings_service(self) -> MagicMock: """Create a mock settings service.""" settings_service = MagicMock() settings_service.settings = MagicMock() settings_service.settings.transactions_storage_enabled = True return settings_service def test_should_extend_service_factory(self, factory: TransactionServiceFactory) -> None: """Verify TransactionServiceFactory extends ServiceFactory.""" assert isinstance(factory, ServiceFactory) def test_should_have_correct_service_class(self, factory: TransactionServiceFactory) -> None: """Verify factory has correct service_class attribute.""" assert factory.service_class is TransactionService def test_should_have_settings_service_dependency(self, factory: TransactionServiceFactory) -> None: """Verify factory has SETTINGS_SERVICE as dependency.""" assert ServiceType.SETTINGS_SERVICE in factory.dependencies def test_should_create_transaction_service( self, factory: TransactionServiceFactory, mock_settings_service: MagicMock ) -> None: """Verify factory creates TransactionService instance.""" service = factory.create(mock_settings_service) assert isinstance(service, TransactionService) def test_should_pass_settings_service_to_created_service( self, factory: TransactionServiceFactory, mock_settings_service: MagicMock ) -> None: """Verify factory passes settings_service to created service.""" service = factory.create(mock_settings_service) assert service.settings_service is mock_settings_service def test_should_create_service_with_is_enabled_true( self, factory: TransactionServiceFactory, mock_settings_service: MagicMock ) -> None: """Verify created service has is_enabled=True when transactions enabled.""" mock_settings_service.settings.transactions_storage_enabled = True service = factory.create(mock_settings_service) assert service.is_enabled() is True def test_should_create_service_with_is_enabled_false( self, factory: TransactionServiceFactory, mock_settings_service: MagicMock ) -> None: """Verify created service has is_enabled=False when transactions disabled.""" mock_settings_service.settings.transactions_storage_enabled = False service = factory.create(mock_settings_service) assert service.is_enabled() is False
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/services/transaction/test_factory.py", "license": "MIT License", "lines": 55, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/services/transaction/test_noop_transaction_service.py
"""Tests for NoopTransactionService.""" import pytest from lfx.services.interfaces import TransactionServiceProtocol from lfx.services.transaction.service import NoopTransactionService class TestNoopTransactionService: """Test suite for NoopTransactionService.""" @pytest.fixture def service(self) -> NoopTransactionService: """Create a NoopTransactionService instance for testing.""" return NoopTransactionService() def test_should_implement_protocol(self, service: NoopTransactionService) -> None: """Verify NoopTransactionService implements TransactionServiceProtocol.""" assert isinstance(service, TransactionServiceProtocol) def test_should_return_false_for_is_enabled(self, service: NoopTransactionService) -> None: """Verify is_enabled always returns False for noop service.""" assert service.is_enabled() is False @pytest.mark.asyncio async def test_should_not_raise_when_logging_transaction(self, service: NoopTransactionService) -> None: """Verify log_transaction completes without raising exceptions.""" await service.log_transaction( flow_id="test-flow-id", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", ) @pytest.mark.asyncio async def test_should_handle_none_inputs_and_outputs(self, service: NoopTransactionService) -> None: """Verify log_transaction handles None inputs and outputs gracefully.""" await service.log_transaction( flow_id="test-flow-id", vertex_id="test-vertex-id", inputs=None, outputs=None, status="success", ) @pytest.mark.asyncio async def test_should_handle_error_status(self, service: NoopTransactionService) -> None: """Verify log_transaction handles error status with error message.""" await service.log_transaction( flow_id="test-flow-id", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs=None, status="error", error="Something went wrong", ) @pytest.mark.asyncio async def test_should_handle_target_id(self, service: NoopTransactionService) -> None: """Verify log_transaction handles target_id parameter.""" await service.log_transaction( flow_id="test-flow-id", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", target_id="target-vertex-id", ) @pytest.mark.asyncio async def test_should_handle_all_parameters(self, service: NoopTransactionService) -> None: """Verify log_transaction handles all parameters together.""" await service.log_transaction( flow_id="flow-123", vertex_id="vertex-456", inputs={"input_key": "input_value"}, outputs={"output_key": "output_value"}, status="success", target_id="target-789", error=None, )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/services/transaction/test_noop_transaction_service.py", "license": "MIT License", "lines": 70, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/services/transaction/test_service.py
"""Tests for TransactionService.""" from unittest.mock import AsyncMock, MagicMock, patch from uuid import UUID import pytest from langflow.services.transaction.service import TransactionService from lfx.services.interfaces import TransactionServiceProtocol class TestTransactionService: """Test suite for TransactionService.""" @pytest.fixture def mock_settings_service(self) -> MagicMock: """Create a mock settings service.""" settings_service = MagicMock() settings_service.settings = MagicMock() settings_service.settings.transactions_storage_enabled = True return settings_service @pytest.fixture def mock_settings_service_disabled(self) -> MagicMock: """Create a mock settings service with transactions disabled.""" settings_service = MagicMock() settings_service.settings = MagicMock() settings_service.settings.transactions_storage_enabled = False return settings_service @pytest.fixture def service(self, mock_settings_service: MagicMock) -> TransactionService: """Create a TransactionService instance for testing.""" return TransactionService(mock_settings_service) @pytest.fixture def service_disabled(self, mock_settings_service_disabled: MagicMock) -> TransactionService: """Create a TransactionService instance with transactions disabled.""" return TransactionService(mock_settings_service_disabled) def test_should_implement_protocol(self, service: TransactionService) -> None: """Verify TransactionService implements TransactionServiceProtocol.""" assert isinstance(service, TransactionServiceProtocol) def test_should_have_correct_name(self, service: TransactionService) -> None: """Verify service has correct name attribute.""" assert service.name == "transaction_service" def test_should_return_true_for_is_enabled_when_enabled(self, service: TransactionService) -> None: """Verify is_enabled returns True when transactions are enabled.""" assert service.is_enabled() is True def test_should_return_false_for_is_enabled_when_disabled(self, service_disabled: TransactionService) -> None: """Verify is_enabled returns False when transactions are disabled.""" assert service_disabled.is_enabled() is False @pytest.mark.asyncio async def test_should_not_log_when_disabled(self, service_disabled: TransactionService) -> None: """Verify log_transaction does nothing when transactions are disabled.""" with patch("langflow.services.transaction.service.session_scope") as mock_session: await service_disabled.log_transaction( flow_id="test-flow-id", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", ) mock_session.assert_not_called() @pytest.mark.asyncio async def test_should_log_transaction_when_enabled(self, service: TransactionService) -> None: """Verify log_transaction creates a transaction record when enabled.""" mock_session = AsyncMock() mock_crud = AsyncMock() with ( patch("langflow.services.transaction.service.session_scope") as mock_session_scope, patch("langflow.services.transaction.service.crud_log_transaction", mock_crud) as mock_log, ): mock_session_scope.return_value.__aenter__ = AsyncMock(return_value=mock_session) mock_session_scope.return_value.__aexit__ = AsyncMock(return_value=None) await service.log_transaction( flow_id="550e8400-e29b-41d4-a716-446655440000", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", ) mock_log.assert_called_once() call_args = mock_log.call_args transaction = call_args[0][1] assert transaction.vertex_id == "test-vertex-id" assert transaction.status == "success" assert transaction.flow_id == UUID("550e8400-e29b-41d4-a716-446655440000") @pytest.mark.asyncio async def test_should_handle_string_flow_id(self, service: TransactionService) -> None: """Verify log_transaction handles string flow_id correctly.""" mock_session = AsyncMock() mock_crud = AsyncMock() with ( patch("langflow.services.transaction.service.session_scope") as mock_session_scope, patch("langflow.services.transaction.service.crud_log_transaction", mock_crud), ): mock_session_scope.return_value.__aenter__ = AsyncMock(return_value=mock_session) mock_session_scope.return_value.__aexit__ = AsyncMock(return_value=None) await service.log_transaction( flow_id="550e8400-e29b-41d4-a716-446655440000", vertex_id="test-vertex-id", inputs=None, outputs=None, status="success", ) call_args = mock_crud.call_args transaction = call_args[0][1] assert isinstance(transaction.flow_id, UUID) @pytest.mark.asyncio async def test_should_handle_error_status_with_message(self, service: TransactionService) -> None: """Verify log_transaction handles error status with error message.""" mock_session = AsyncMock() mock_crud = AsyncMock() with ( patch("langflow.services.transaction.service.session_scope") as mock_session_scope, patch("langflow.services.transaction.service.crud_log_transaction", mock_crud), ): mock_session_scope.return_value.__aenter__ = AsyncMock(return_value=mock_session) mock_session_scope.return_value.__aexit__ = AsyncMock(return_value=None) await service.log_transaction( flow_id="550e8400-e29b-41d4-a716-446655440000", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs=None, status="error", error="Something went wrong", ) call_args = mock_crud.call_args transaction = call_args[0][1] assert transaction.status == "error" assert transaction.error == "Something went wrong" @pytest.mark.asyncio async def test_should_handle_target_id(self, service: TransactionService) -> None: """Verify log_transaction handles target_id parameter.""" mock_session = AsyncMock() mock_crud = AsyncMock() with ( patch("langflow.services.transaction.service.session_scope") as mock_session_scope, patch("langflow.services.transaction.service.crud_log_transaction", mock_crud), ): mock_session_scope.return_value.__aenter__ = AsyncMock(return_value=mock_session) mock_session_scope.return_value.__aexit__ = AsyncMock(return_value=None) await service.log_transaction( flow_id="550e8400-e29b-41d4-a716-446655440000", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", target_id="target-vertex-id", ) call_args = mock_crud.call_args transaction = call_args[0][1] assert transaction.target_id == "target-vertex-id" @pytest.mark.asyncio async def test_should_not_raise_on_database_error(self, service: TransactionService) -> None: """Verify log_transaction handles database errors gracefully.""" with patch("langflow.services.transaction.service.session_scope") as mock_session_scope: mock_session_scope.side_effect = Exception("Database error") # Should not raise await service.log_transaction( flow_id="550e8400-e29b-41d4-a716-446655440000", vertex_id="test-vertex-id", inputs={"key": "value"}, outputs={"result": "output"}, status="success", )
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/services/transaction/test_service.py", "license": "MIT License", "lines": 157, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/services/transaction/service.py
"""Transaction service implementations for lfx.""" from __future__ import annotations from typing import Any from lfx.services.interfaces import TransactionServiceProtocol class NoopTransactionService(TransactionServiceProtocol): """No-operation transaction service for standalone lfx mode. This service is used when lfx runs without a concrete transaction service implementation (e.g., without langflow). All operations are no-ops and transaction logging is disabled. """ async def log_transaction( self, flow_id: str, vertex_id: str, inputs: dict[str, Any] | None, outputs: dict[str, Any] | None, status: str, target_id: str | None = None, error: str | None = None, ) -> None: """No-op implementation of transaction logging. In standalone mode, transactions are not persisted. """ def is_enabled(self) -> bool: """Transaction logging is disabled in noop mode.""" return False
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/services/transaction/service.py", "license": "MIT License", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/tests/unit/components/embeddings/test_ollama_embeddings_component.py
"""Tests for OllamaEmbeddingsComponent. This test module validates the OllamaEmbeddingsComponent functionality: - Building embeddings with various configurations - URL handling (localhost transformation, /v1 suffix stripping) - Model fetching with capability filtering - URL validation - Build config updates - Headers property behavior """ import re from unittest.mock import AsyncMock, MagicMock, mock_open, patch import pytest from lfx.components.ollama.ollama_embeddings import OllamaEmbeddingsComponent from tests.base import ComponentTestBaseWithoutClient class TestOllamaEmbeddingsComponent(ComponentTestBaseWithoutClient): """Tests for the OllamaEmbeddingsComponent.""" @pytest.fixture def component_class(self): """Return the component class to test.""" return OllamaEmbeddingsComponent @pytest.fixture def default_kwargs(self): """Return the default kwargs for the component.""" return { "base_url": "http://localhost:11434", "model_name": "nomic-embed-text", "api_key": None, } @pytest.fixture def file_names_mapping(self): """Return the file names mapping for different versions.""" # Provide an empty list or the actual mapping if versioned files exist return [] # ========================================================================= # Headers Property Tests # ========================================================================= def test_headers_with_api_key(self, component_class): """Test that headers include Authorization when API key is provided.""" component = component_class() component.api_key = "test-api-key-12345" headers = component.headers assert headers is not None assert headers["Authorization"] == "Bearer test-api-key-12345" def test_headers_without_api_key(self, component_class): """Test that headers return None when no API key is provided.""" component = component_class() component.api_key = None headers = component.headers assert headers is None def test_headers_with_empty_api_key(self, component_class): """Test that headers return None when API key is empty string.""" component = component_class() component.api_key = "" headers = component.headers assert headers is None def test_headers_with_whitespace_api_key(self, component_class): """Test that headers return None when API key is whitespace only.""" component = component_class() component.api_key = " " headers = component.headers assert headers is None # ========================================================================= # Build Embeddings Tests # ========================================================================= @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_basic(self, mock_ollama_embeddings, component_class, default_kwargs): """Test build_embeddings with basic parameters.""" mock_instance = MagicMock() mock_ollama_embeddings.return_value = mock_instance component = component_class(**default_kwargs) result = component.build_embeddings() mock_ollama_embeddings.assert_called_once_with( model="nomic-embed-text", base_url="http://localhost:11434", ) assert result == mock_instance @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_with_api_key(self, mock_ollama_embeddings, component_class): """Test build_embeddings passes headers via client_kwargs when API key is set.""" mock_instance = MagicMock() mock_ollama_embeddings.return_value = mock_instance component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = "test-api-key" result = component.build_embeddings() mock_ollama_embeddings.assert_called_once_with( model="nomic-embed-text", base_url="http://localhost:11434", client_kwargs={"headers": {"Authorization": "Bearer test-api-key"}}, ) assert result == mock_instance @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_without_api_key_no_client_kwargs(self, mock_ollama_embeddings, component_class): """Test build_embeddings doesn't pass client_kwargs when no API key.""" mock_instance = MagicMock() mock_ollama_embeddings.return_value = mock_instance component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = None component.build_embeddings() # Verify client_kwargs is not in the call call_kwargs = mock_ollama_embeddings.call_args[1] assert "client_kwargs" not in call_kwargs @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_connection_error(self, mock_ollama_embeddings, component_class): """Test build_embeddings raises ValueError on connection error.""" mock_ollama_embeddings.side_effect = Exception("connection error") component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = None with pytest.raises(ValueError, match=re.escape("Unable to connect to the Ollama API.")): component.build_embeddings() # ========================================================================= # URL Handling Tests - /v1 Suffix Stripping # ========================================================================= @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") @patch("lfx.components.ollama.ollama_embeddings.logger") def test_build_embeddings_strips_v1_suffix_and_logs_warning( self, mock_logger, mock_ollama_embeddings, component_class ): """Test that /v1 suffix is automatically stripped and a warning is logged.""" mock_instance = MagicMock() mock_ollama_embeddings.return_value = mock_instance component = component_class() component.base_url = "http://localhost:11434/v1" component.model_name = "nomic-embed-text" component.api_key = None component.build_embeddings() # Verify warning was logged mock_logger.warning.assert_called_once() warning_message = mock_logger.warning.call_args[0][0] assert "Detected '/v1' suffix in base URL" in warning_message assert "https://docs.ollama.com/openai#openai-compatibility" in warning_message # Verify OllamaEmbeddings was called without /v1 call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://localhost:11434" @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") @patch("lfx.components.ollama.ollama_embeddings.logger") def test_build_embeddings_strips_v1_trailing_slash(self, mock_logger, mock_ollama_embeddings, component_class): """Test that /v1/ suffix is also automatically stripped.""" mock_instance = MagicMock() mock_ollama_embeddings.return_value = mock_instance component = component_class() component.base_url = "http://localhost:11434/v1/" component.model_name = "nomic-embed-text" component.api_key = None component.build_embeddings() # Verify warning was logged mock_logger.warning.assert_called_once() # Verify OllamaEmbeddings was called without /v1 call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://localhost:11434" # ========================================================================= # URL Handling Tests - Localhost Transformation # ========================================================================= @patch("socket.getaddrinfo") @patch("lfx.utils.util.Path") @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_transforms_localhost_in_docker_container( self, mock_ollama_embeddings, mock_path_class, mock_getaddrinfo, component_class ): """Test that localhost URLs are transformed to host.docker.internal in Docker container.""" # Mock Docker container detection def path_side_effect(path_str): mock_instance = MagicMock() if path_str == "/.dockerenv": mock_instance.exists.return_value = True else: mock_instance.exists.return_value = False return mock_instance mock_path_class.side_effect = path_side_effect # Mock getaddrinfo to succeed for host.docker.internal mock_getaddrinfo.return_value = [("AF_INET", "SOCK_STREAM", 6, "", ("192.168.65.2", 0))] mock_model = MagicMock() mock_ollama_embeddings.return_value = mock_model component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = None result = component.build_embeddings() # Verify OllamaEmbeddings was called with host.docker.internal call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://host.docker.internal:11434" assert result == mock_model @patch("lfx.utils.util.Path") @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_no_transform_outside_container( self, mock_ollama_embeddings, mock_path_class, component_class ): """Test that localhost URLs are NOT transformed when running outside a container.""" # Mock no container environment mock_instance = MagicMock() mock_instance.exists.return_value = False mock_path_class.return_value = mock_instance mock_model = MagicMock() mock_ollama_embeddings.return_value = mock_model component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = None result = component.build_embeddings() # Verify OllamaEmbeddings was called with original localhost URL call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://localhost:11434" assert result == mock_model @patch("socket.getaddrinfo") @patch("lfx.utils.util.Path") @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_transforms_localhost_in_podman_container( self, mock_ollama_embeddings, mock_path_class, mock_getaddrinfo, component_class ): """Test that localhost URLs are transformed to host.containers.internal in Podman container.""" # Mock Podman container detection (no .dockerenv, but has podman in cgroup) cgroup_content = "12:pids:/podman/abc123\n" mock_cgroup = mock_open(read_data=cgroup_content) def path_side_effect(path_str): mock_instance = MagicMock() if path_str == "/.dockerenv": mock_instance.exists.return_value = False elif path_str == "/proc/self/cgroup": mock_instance.exists.return_value = True mock_instance.open = mock_cgroup else: mock_instance.exists.return_value = False return mock_instance mock_path_class.side_effect = path_side_effect # Mock getaddrinfo to succeed for host.containers.internal mock_getaddrinfo.return_value = [("AF_INET", "SOCK_STREAM", 6, "", ("192.168.65.2", 0))] mock_model = MagicMock() mock_ollama_embeddings.return_value = mock_model component = component_class() component.base_url = "http://localhost:11434" component.model_name = "nomic-embed-text" component.api_key = None result = component.build_embeddings() # Verify OllamaEmbeddings was called with host.containers.internal call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://host.containers.internal:11434" assert result == mock_model @patch("socket.getaddrinfo") @patch("lfx.utils.util.Path") @patch("lfx.components.ollama.ollama_embeddings.OllamaEmbeddings") def test_build_embeddings_transforms_127_0_0_1_in_container( self, mock_ollama_embeddings, mock_path_class, mock_getaddrinfo, component_class ): """Test that 127.0.0.1 URLs are also transformed in container.""" # Mock Docker container detection def path_side_effect(path_str): mock_instance = MagicMock() if path_str == "/.dockerenv": mock_instance.exists.return_value = True else: mock_instance.exists.return_value = False return mock_instance mock_path_class.side_effect = path_side_effect # Mock getaddrinfo to succeed for host.docker.internal mock_getaddrinfo.return_value = [("AF_INET", "SOCK_STREAM", 6, "", ("192.168.65.2", 0))] mock_model = MagicMock() mock_ollama_embeddings.return_value = mock_model component = component_class() component.base_url = "http://127.0.0.1:11434" component.model_name = "nomic-embed-text" component.api_key = None result = component.build_embeddings() # Verify OllamaEmbeddings was called with host.docker.internal call_kwargs = mock_ollama_embeddings.call_args[1] assert call_kwargs["base_url"] == "http://host.docker.internal:11434" assert result == mock_model # ========================================================================= # Model Fetching Tests (async) # ========================================================================= @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_success_filters_embedding_capability(self, mock_get, mock_post, component_class): """Test get_model returns only models with embedding capability.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, {component.JSON_NAME_KEY: "llama3.1"}, {component.JSON_NAME_KEY: "mxbai-embed-large"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) # First model has embedding capability, second doesn't, third does mock_post_response.json.side_effect = [ {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]}, {component.JSON_CAPABILITIES_KEY: ["completion"]}, # Not an embedding model {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY, "completion"]}, ] mock_post.return_value = mock_post_response base_url = "http://localhost:11434" result = await component.get_model(base_url) # Should only return embedding models assert result == ["nomic-embed-text", "mxbai-embed-large"] assert mock_get.call_count == 1 assert mock_post.call_count == 3 @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_failure(self, mock_get, component_class): """Test get_model raises ValueError on connection error.""" import httpx component = component_class() mock_get.side_effect = httpx.RequestError("Connection error", request=None) base_url = "http://localhost:11434" with pytest.raises(ValueError, match=re.escape("Could not get model names from Ollama.")): await component.get_model(base_url) @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_with_v1_suffix_stripped(self, mock_get, mock_post, component_class): """Test that get_model strips /v1 suffix when fetching models.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.return_value = {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]} mock_post.return_value = mock_post_response base_url = "http://localhost:11434/v1" result = await component.get_model(base_url) # Verify it called /api/tags without /v1 assert mock_get.call_count == 1 called_kwargs = mock_get.call_args[1] assert called_kwargs["url"] == "http://localhost:11434/api/tags" assert result == ["nomic-embed-text"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_with_api_key_passes_headers(self, mock_get, mock_post, component_class): """Test that get_model passes headers when API key is set.""" component = component_class() component.api_key = "test-api-key" mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.return_value = {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]} mock_post.return_value = mock_post_response base_url = "http://localhost:11434" result = await component.get_model(base_url) # Verify headers were passed to both GET and POST get_call_kwargs = mock_get.call_args[1] assert get_call_kwargs["headers"]["Authorization"] == "Bearer test-api-key" post_call_kwargs = mock_post.call_args[1] assert post_call_kwargs["headers"]["Authorization"] == "Bearer test-api-key" assert result == ["nomic-embed-text"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_empty_model_list(self, mock_get, component_class): """Test get_model returns empty list when no models are available.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = {component.JSON_MODELS_KEY: []} mock_get.return_value = mock_get_response base_url = "http://localhost:11434" result = await component.get_model(base_url) assert result == [] assert mock_get.call_count == 1 @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_missing_capabilities_key(self, mock_get, mock_post, component_class): """Test get_model handles models with missing capabilities key (defaults to empty list).""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "model-without-caps"}, {component.JSON_NAME_KEY: "embedding-model"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) # First model has no capabilities key, second has embedding capability mock_post_response.json.side_effect = [ {}, # No capabilities key at all {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]}, ] mock_post.return_value = mock_post_response base_url = "http://localhost:11434" result = await component.get_model(base_url) # Should only return the model with embedding capability assert result == ["embedding-model"] assert mock_post.call_count == 2 @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_without_api_key_no_headers(self, mock_get, mock_post, component_class): """Test that get_model passes None headers when no API key is set.""" component = component_class() component.api_key = None mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.return_value = {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]} mock_post.return_value = mock_post_response base_url = "http://localhost:11434" result = await component.get_model(base_url) # Verify headers were None for both GET and POST get_call_kwargs = mock_get.call_args[1] assert get_call_kwargs["headers"] is None post_call_kwargs = mock_post.call_args[1] assert post_call_kwargs["headers"] is None assert result == ["nomic-embed-text"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_url_normalization_trailing_slash(self, mock_get, mock_post, component_class): """Test that get_model normalizes URLs with trailing slashes.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.return_value = {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]} mock_post.return_value = mock_post_response # Test with trailing slash base_url = "http://localhost:11434/" result = await component.get_model(base_url) # Verify it called the correct URL assert mock_get.call_count == 1 called_kwargs = mock_get.call_args[1] assert called_kwargs["url"] == "http://localhost:11434/api/tags" assert result == ["nomic-embed-text"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_url_normalization_v1_trailing_slash(self, mock_get, mock_post, component_class): """Test that get_model normalizes URLs with /v1/ (trailing slash after v1).""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.return_value = {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]} mock_post.return_value = mock_post_response # Test with /v1/ (trailing slash after v1) base_url = "http://localhost:11434/v1/" result = await component.get_model(base_url) # Verify it called the correct URL without /v1 assert mock_get.call_count == 1 called_kwargs = mock_get.call_args[1] assert called_kwargs["url"] == "http://localhost:11434/api/tags" assert result == ["nomic-embed-text"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_all_models_non_embedding(self, mock_get, mock_post, component_class): """Test get_model returns empty list when no models have embedding capability.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "llama3.1"}, {component.JSON_NAME_KEY: "codellama"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) # All models have completion capability only, no embedding mock_post_response.json.side_effect = [ {component.JSON_CAPABILITIES_KEY: ["completion"]}, {component.JSON_CAPABILITIES_KEY: ["completion", "tools"]}, ] mock_post.return_value = mock_post_response base_url = "http://localhost:11434" result = await component.get_model(base_url) # Should return empty list since no embedding models assert result == [] assert mock_post.call_count == 2 @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_get_model_post_request_failure(self, mock_get, mock_post, component_class): """Test get_model raises ValueError when POST request to get capabilities fails.""" import httpx component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get.return_value = mock_get_response # POST request fails mock_post.side_effect = httpx.RequestError("Connection error", request=None) base_url = "http://localhost:11434" with pytest.raises(ValueError, match=re.escape("Could not get model names from Ollama.")): await component.get_model(base_url) # ========================================================================= # URL Validation Tests (async) # ========================================================================= @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_is_valid_ollama_url_success(self, mock_get, component_class): """Test is_valid_ollama_url returns True for valid URL.""" component = component_class() mock_response = AsyncMock() mock_response.status_code = 200 mock_get.return_value = mock_response result = await component.is_valid_ollama_url("http://localhost:11434") assert result is True mock_get.assert_called_once() @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_is_valid_ollama_url_failure(self, mock_get, component_class): """Test is_valid_ollama_url returns False on connection error.""" import httpx component = component_class() mock_get.side_effect = httpx.RequestError("Connection error", request=None) result = await component.is_valid_ollama_url("http://localhost:11434") assert result is False @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_is_valid_ollama_url_with_v1_suffix(self, mock_get, component_class): """Test that is_valid_ollama_url strips /v1 suffix when validating.""" component = component_class() mock_response = AsyncMock() mock_response.status_code = 200 mock_get.return_value = mock_response result = await component.is_valid_ollama_url("http://localhost:11434/v1") # Verify it called /api/tags without /v1 mock_get.assert_called_once() called_kwargs = mock_get.call_args[1] assert called_kwargs["url"] == "http://localhost:11434/api/tags" assert result is True @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_is_valid_ollama_url_with_api_key_passes_headers(self, mock_get, component_class): """Test that is_valid_ollama_url passes headers when API key is set.""" component = component_class() component.api_key = "test-api-key" mock_response = AsyncMock() mock_response.status_code = 200 mock_get.return_value = mock_response result = await component.is_valid_ollama_url("http://localhost:11434") # Verify headers were passed call_kwargs = mock_get.call_args[1] assert call_kwargs["headers"]["Authorization"] == "Bearer test-api-key" assert result is True @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_is_valid_ollama_url_without_api_key_no_headers(self, mock_get, component_class): """Test that is_valid_ollama_url doesn't pass headers when no API key.""" component = component_class() component.api_key = None mock_response = AsyncMock() mock_response.status_code = 200 mock_get.return_value = mock_response result = await component.is_valid_ollama_url("http://localhost:11434") # Verify headers were None call_kwargs = mock_get.call_args[1] assert call_kwargs["headers"] is None assert result is True @pytest.mark.asyncio async def test_is_valid_ollama_url_with_empty_url(self, component_class): """Test that is_valid_ollama_url returns False for empty URL.""" component = component_class() result = await component.is_valid_ollama_url("") assert result is False @pytest.mark.asyncio async def test_is_valid_ollama_url_with_none_url(self, component_class): """Test that is_valid_ollama_url returns False for None URL.""" component = component_class() result = await component.is_valid_ollama_url(None) assert result is False # ========================================================================= # Build Config Update Tests (async) # ========================================================================= @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.post") @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_update_build_config_model_name_field(self, mock_get, mock_post, component_class): """Test update_build_config populates model options when model_name field is updated.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, {component.JSON_NAME_KEY: "mxbai-embed-large"}, ] } mock_get.return_value = mock_get_response mock_post_response = AsyncMock() mock_post_response.raise_for_status = MagicMock(return_value=None) mock_post_response.json.side_effect = [ {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]}, {component.JSON_CAPABILITIES_KEY: [component.EMBEDDING_CAPABILITY]}, ] mock_post.return_value = mock_post_response component.base_url = "http://localhost:11434" build_config = { "model_name": {"options": []}, } # Mock is_valid_ollama_url to return True with patch.object(component, "is_valid_ollama_url", new_callable=AsyncMock) as mock_valid: mock_valid.return_value = True updated_config = await component.update_build_config(build_config, "nomic-embed-text", "model_name") assert "nomic-embed-text" in updated_config["model_name"]["options"] assert "mxbai-embed-large" in updated_config["model_name"]["options"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_update_build_config_base_url_field(self, mock_get, component_class): """Test update_build_config populates model options when base_url field is updated.""" component = component_class() mock_get_response = AsyncMock() mock_get_response.raise_for_status = MagicMock(return_value=None) mock_get_response.json.return_value = { component.JSON_MODELS_KEY: [ {component.JSON_NAME_KEY: "nomic-embed-text"}, ] } mock_get_response.status_code = 200 mock_get.return_value = mock_get_response component.base_url = "http://localhost:11434" build_config = { "model_name": {"options": []}, } # Mock both is_valid_ollama_url and get_model with ( patch.object(component, "is_valid_ollama_url", new_callable=AsyncMock) as mock_valid, patch.object(component, "get_model", new_callable=AsyncMock) as mock_get_model, ): mock_valid.return_value = True mock_get_model.return_value = ["nomic-embed-text", "mxbai-embed-large"] updated_config = await component.update_build_config(build_config, "http://localhost:11434", "base_url") assert "nomic-embed-text" in updated_config["model_name"]["options"] assert "mxbai-embed-large" in updated_config["model_name"]["options"] @pytest.mark.asyncio @patch("lfx.components.ollama.ollama_embeddings.httpx.AsyncClient.get") async def test_update_build_config_when_ollama_not_running(self, mock_get, component_class): """Test update_build_config raises error when Ollama isn't running.""" import httpx component = component_class() mock_get.side_effect = httpx.RequestError("Connection error", request=None) component.base_url = "http://localhost:11434" build_config = { "model_name": {"options": []}, } # Mock is_valid_ollama_url to return False with patch.object(component, "is_valid_ollama_url", new_callable=AsyncMock) as mock_valid: mock_valid.return_value = False with pytest.raises(ValueError, match="Ollama is not running"): await component.update_build_config(build_config, "nomic-embed-text", "model_name") @pytest.mark.asyncio async def test_update_build_config_empty_options_when_url_invalid(self, component_class): """Test update_build_config sets empty options when URL is invalid.""" component = component_class() component.base_url = "http://localhost:11434" build_config = { "model_name": {"options": ["old-model"]}, } # Mock is_valid_ollama_url to return True but get_model to fail gracefully with ( patch.object(component, "is_valid_ollama_url", new_callable=AsyncMock) as mock_valid, patch.object(component, "get_model", new_callable=AsyncMock) as mock_get_model, ): mock_valid.return_value = True mock_get_model.return_value = [] updated_config = await component.update_build_config(build_config, "http://localhost:11434", "base_url") assert updated_config["model_name"]["options"] == [] # ========================================================================= # Edge Cases and Integration Tests # ========================================================================= def test_component_constants(self, component_class): """Test that component constants are correctly defined.""" component = component_class() assert component.JSON_MODELS_KEY == "models" assert component.JSON_NAME_KEY == "name" assert component.JSON_CAPABILITIES_KEY == "capabilities" assert component.EMBEDDING_CAPABILITY == "embedding" def test_component_metadata(self, component_class): """Test that component metadata is correctly defined.""" component = component_class() assert component.display_name == "Ollama Embeddings" assert component.name == "OllamaEmbeddings" assert component.icon == "Ollama" assert "embeddings" in component.description.lower() def test_component_inputs(self, component_class): """Test that component has expected inputs.""" component = component_class() input_names = [inp.name for inp in component.inputs] assert "model_name" in input_names assert "base_url" in input_names assert "api_key" in input_names def test_component_outputs(self, component_class): """Test that component has expected outputs.""" component = component_class() output_names = [out.name for out in component.outputs] assert "embeddings" in output_names
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/embeddings/test_ollama_embeddings_component.py", "license": "MIT License", "lines": 737, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_database_path_resolution.py
"""Tests for database path resolution in settings. These tests verify that the database path is correctly resolved based on the save_db_in_config_dir setting and langflow package availability. """ import os from pathlib import Path from unittest.mock import patch class TestDatabasePathResolution: """Test database path resolution in Settings.""" def test_database_path_uses_langflow_package_when_save_db_in_config_dir_false(self, tmp_path): """When save_db_in_config_dir=False, database should be in langflow package dir.""" import langflow from lfx.services.settings.base import Settings env_vars = { "LANGFLOW_CONFIG_DIR": str(tmp_path), "LANGFLOW_SAVE_DB_IN_CONFIG_DIR": "false", } # Remove DATABASE_URL from env to trigger path resolution env = {k: v for k, v in os.environ.items() if k != "LANGFLOW_DATABASE_URL"} env.update(env_vars) with patch.dict(os.environ, env, clear=True): settings = Settings() expected_dir = Path(langflow.__file__).parent.resolve() assert settings.database_url is not None # The database_url should contain the langflow package path assert str(expected_dir) in settings.database_url def test_database_path_uses_config_dir_when_save_db_in_config_dir_true(self, tmp_path): """When save_db_in_config_dir=True, database should be in config_dir.""" from lfx.services.settings.base import Settings config_dir = tmp_path / "config" config_dir.mkdir() env_vars = { "LANGFLOW_CONFIG_DIR": str(config_dir), "LANGFLOW_SAVE_DB_IN_CONFIG_DIR": "true", } # Remove DATABASE_URL from env to trigger path resolution env = {k: v for k, v in os.environ.items() if k != "LANGFLOW_DATABASE_URL"} env.update(env_vars) with patch.dict(os.environ, env, clear=True): settings = Settings() assert settings.database_url is not None assert str(config_dir) in settings.database_url def test_database_path_falls_back_to_lfx_when_langflow_not_importable(self, tmp_path): """When langflow is not importable, should fall back to lfx package path.""" import builtins import lfx.services.settings.base as settings_module from lfx.services.settings.base import Settings original_import = builtins.__import__ def mock_import(name, *args, **kwargs): if name == "langflow": raise ImportError(name) return original_import(name, *args, **kwargs) env_vars = { "LANGFLOW_CONFIG_DIR": str(tmp_path), "LANGFLOW_SAVE_DB_IN_CONFIG_DIR": "false", } env = {k: v for k, v in os.environ.items() if k != "LANGFLOW_DATABASE_URL"} env.update(env_vars) with ( patch.dict(os.environ, env, clear=True), patch.object(builtins, "__import__", side_effect=mock_import), ): settings = Settings() # Should fall back to lfx path lfx_path = Path(settings_module.__file__).parent.parent.parent.resolve() assert settings.database_url is not None assert str(lfx_path) in settings.database_url def test_explicit_database_url_env_var_takes_precedence(self, tmp_path): """LANGFLOW_DATABASE_URL env var should take precedence over path resolution.""" from lfx.services.settings.base import Settings custom_url = "sqlite:///custom/path/test.db" with patch.dict( os.environ, {"LANGFLOW_DATABASE_URL": custom_url, "LANGFLOW_CONFIG_DIR": str(tmp_path)}, clear=False, ): settings = Settings(config_dir=str(tmp_path)) assert settings.database_url == custom_url
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_database_path_resolution.py", "license": "MIT License", "lines": 78, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/integration/cli/test_simple_agent_integration.py
"""Integration tests for lfx CLI with Simple Agent flow. These tests verify that the lfx CLI can properly load and execute the Simple Agent starter project, addressing the bug where lfx serve/run commands fail with module resolution errors. Requirements: - OPENAI_API_KEY environment variable must be set for execution tests - Integration dependencies must be installed (use: uv sync --group integration) Note on version compatibility: - lfx requires langchain-core>=0.3.66,<1.0.0 - langchain-openai 1.x requires langchain-core 1.x which is incompatible - When installing langchain-openai, use: langchain-openai>=0.3.0,<1.0.0 """ import importlib.util import json import os import re import select import signal # Find a free port for testing import socket import subprocess import sys import time import urllib.request from pathlib import Path import pytest from lfx.__main__ import app as lfx_app from typer.testing import CliRunner runner = CliRunner() def has_integration_deps() -> bool: """Check if integration dependencies are installed.""" required_modules = ["langchain_openai", "langchain_community", "bs4", "lxml"] return all(importlib.util.find_spec(module) is not None for module in required_modules) # Skip all tests in this module if integration deps are not installed pytestmark = pytest.mark.skipif( not has_integration_deps(), reason="Integration dependencies not installed. Run: uv sync --group integration", ) def get_starter_projects_path() -> Path: """Get path to starter projects directory.""" test_file_path = Path(__file__).resolve() current = test_file_path.parent while current != current.parent: starter_path = current / "src" / "backend" / "base" / "langflow" / "initial_setup" / "starter_projects" if starter_path.exists(): return starter_path current = current.parent # Return an empty Path() if not found return Path() def get_simple_agent_flow_path() -> Path: """Get path to Simple Agent starter project.""" return get_starter_projects_path() / "Simple Agent.json" def has_openai_api_key() -> bool: """Check if OPENAI_API_KEY is set.""" key = os.getenv("OPENAI_API_KEY", "") return bool(key) and key != "dummy" and len(key) > 10 def parse_json_from_output(output: str, context: str = "output") -> dict: """Parse JSON from command output, searching in reverse if direct parsing fails. Args: output: The command output string to parse. context: Description of the output source for error messages. Returns: The parsed JSON as a dictionary. Raises: pytest.fail: If no valid JSON is found in the output. """ try: return json.loads(output) except json.JSONDecodeError: lines = output.split("\n") for line in reversed(lines): try: return json.loads(line) except json.JSONDecodeError: continue pytest.fail(f"No valid JSON in {context}: {output}") class TestSimpleAgentFlowLoading: """Test that Simple Agent flow can be loaded without errors.""" @pytest.fixture def simple_agent_flow_path(self) -> Path: """Get Simple Agent flow path, skip if not found.""" path = get_simple_agent_flow_path() if not path.exists(): pytest.skip(f"Simple Agent flow not found at {path}") return path def test_simple_agent_flow_loads_via_cli(self, simple_agent_flow_path: Path): """Test that lfx run can load the Simple Agent flow without critical errors.""" result = runner.invoke( lfx_app, ["run", "--verbose", "--no-check-variables", str(simple_agent_flow_path), "test input"], ) output = result.output # These are the critical errors that indicate structural problems critical_errors = [ "No module named 'lfx.components", "No module named 'langflow", "'NoneType' object has no attribute 'resolve_component_path'", "Error creating class. ModuleNotFoundError", ] for error in critical_errors: assert error not in output, f"Critical error found: {error}\nFull output:\n{output}" def test_simple_agent_flow_loads_directly(self, simple_agent_flow_path: Path): """Test that Simple Agent flow loads correctly using load_flow_from_json.""" from lfx.load import load_flow_from_json try: graph = load_flow_from_json(simple_agent_flow_path, disable_logs=True) assert graph is not None, "Graph should not be None" assert hasattr(graph, "vertices"), "Graph should have vertices" assert len(graph.vertices) > 0, "Graph should have at least one vertex" # Prepare the graph graph.prepare() # Verify Agent component is in the graph component_types = {v.display_name for v in graph.vertices if hasattr(v, "display_name")} assert "Agent" in component_types or any("Agent" in ct for ct in component_types), ( f"Expected Agent in graph, found: {component_types}" ) except ModuleNotFoundError as e: pytest.fail(f"ModuleNotFoundError loading graph: {e}") except Exception as e: if "resolve_component_path" in str(e): pytest.fail(f"Storage service error: {e}") raise def test_simple_agent_flow_json_output(self, simple_agent_flow_path: Path): """Test that lfx run produces valid JSON output.""" result = runner.invoke( lfx_app, ["run", "--format", "json", "--no-check-variables", str(simple_agent_flow_path), "test"], ) # Output should contain valid JSON output_json = parse_json_from_output(result.output.strip()) assert isinstance(output_json, dict), "Output should be a JSON object" class TestSimpleAgentExecution: """Test that Simple Agent flow can actually execute with real API key.""" @pytest.fixture def simple_agent_flow_path(self) -> Path: """Get Simple Agent flow path, skip if not found.""" path = get_simple_agent_flow_path() if not path.exists(): pytest.skip(f"Simple Agent flow not found at {path}") return path @pytest.mark.skipif(not has_openai_api_key(), reason="OPENAI_API_KEY required") def test_simple_agent_executes_successfully(self, simple_agent_flow_path: Path): """Test full execution of Simple Agent with real API key. This test verifies that the Simple Agent flow executes successfully and returns a valid response. """ result = subprocess.run( # noqa: S603 [ sys.executable, "-m", "lfx", "run", "--format", "json", str(simple_agent_flow_path), "What is 2 + 2?", ], capture_output=True, text=True, timeout=120, check=False, env={**os.environ}, ) # Parse output output = result.stdout.strip() or result.stderr.strip() output_json = parse_json_from_output(output, context=f"stdout: {result.stdout}\nstderr: {result.stderr}") # Assert successful execution assert output_json.get("success") is True, f"Execution failed: {output_json}" assert "result" in output_json, f"No result in output: {output_json}" # Verify we got a meaningful response result_text = str(output_json.get("result", "")) assert len(result_text) > 0, "Result should not be empty" @pytest.mark.skipif(not has_openai_api_key(), reason="OPENAI_API_KEY required") def test_simple_agent_with_math_question(self, simple_agent_flow_path: Path): """Test Simple Agent can use Calculator tool.""" result = subprocess.run( # noqa: S603 [ sys.executable, "-m", "lfx", "run", "--format", "json", str(simple_agent_flow_path), "Calculate 15 multiplied by 7", ], capture_output=True, text=True, timeout=120, check=False, env={**os.environ}, ) output = result.stdout.strip() or result.stderr.strip() output_json = parse_json_from_output(output) if output_json.get("success"): result_text = str(output_json.get("result", "")) # The agent should compute 15 * 7 = 105 assert "105" in result_text, f"Expected 105 in result: {result_text}" class TestSimpleAgentServe: """Test that Simple Agent can be served.""" @pytest.fixture def simple_agent_flow_path(self) -> Path: """Get Simple Agent flow path, skip if not found.""" path = get_simple_agent_flow_path() if not path.exists(): pytest.skip(f"Simple Agent flow not found at {path}") return path def test_serve_help(self): """Test serve help command works.""" result = runner.invoke(lfx_app, ["serve", "--help"]) assert result.exit_code == 0 assert "serve" in result.output.lower() or "Serve" in result.output def test_serve_requires_api_key(self, simple_agent_flow_path: Path, monkeypatch): """Test serve requires LANGFLOW_API_KEY.""" monkeypatch.delenv("LANGFLOW_API_KEY", raising=False) result = runner.invoke( lfx_app, ["serve", str(simple_agent_flow_path)], ) # Should fail or warn about API key assert result.exit_code != 0 or "LANGFLOW_API_KEY" in result.output def test_serve_loads_flow(self, simple_agent_flow_path: Path): """Test serve can load the flow without module errors. Note: We test graph loading directly instead of invoking the serve command because the serve command now properly starts a server that runs indefinitely. """ from lfx.load import load_flow_from_json try: graph = load_flow_from_json(simple_agent_flow_path, disable_logs=True) assert graph is not None, "Graph should not be None" graph.prepare() except ModuleNotFoundError as e: pytest.fail(f"ModuleNotFoundError loading graph for serve: {e}") except Exception as e: if "resolve_component_path" in str(e): pytest.fail(f"Storage service error: {e}") raise def test_serve_starts_server_no_asyncio_error(self, simple_agent_flow_path: Path): """Regression test: lfx serve should not fail with asyncio error. This test verifies the fix for the issue where lfx serve failed with: 'asyncio.run() cannot be called from a running event loop' The fix was to use uvicorn.Server with await server.serve() instead of uvicorn.run() which internally calls asyncio.run(). """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("127.0.0.1", 0)) test_port = s.getsockname()[1] # Start serve in a subprocess with unbuffered output on a specific port proc = subprocess.Popen( # noqa: S603 [ sys.executable, "-u", # Unbuffered output "-m", "lfx", "serve", "--verbose", "--port", str(test_port), str(simple_agent_flow_path), ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, env={**os.environ, "LANGFLOW_API_KEY": "test-key-12345"}, # pragma: allowlist secret ) server_ready = False output_chunks = [] timeout = 15 # seconds start_time = time.time() actual_port = test_port try: while time.time() - start_time < timeout: # Check if process exited exit_code = proc.poll() if exit_code is not None: # Process exited - read remaining output if proc.stdout: remaining = proc.stdout.read() if remaining: output_chunks.append(remaining) output = "".join(output_chunks) # Check for the specific asyncio errors we're regression testing if "asyncio.run() cannot be called from a running event loop" in output: pytest.fail(f"Regression: lfx serve failed with asyncio error.\nOutput:\n{output}") if "coroutine 'Server.serve' was never awaited" in output: pytest.fail(f"Regression: Server.serve coroutine was never awaited.\nOutput:\n{output}") # Process exited for another reason pytest.fail(f"Server process exited with code {exit_code}.\nOutput:\n{output}") # Try to read available output without blocking (Unix only) if proc.stdout: try: ready, _, _ = select.select([proc.stdout], [], [], 0.1) if ready: chunk = proc.stdout.readline() if chunk: output_chunks.append(chunk) # Check if server switched to a different port if "using port" in chunk.lower(): port_match = re.search(r"port (\d+)", chunk) if port_match: actual_port = int(port_match.group(1)) except (ValueError, OSError): pass # Try to connect to server on actual port try: urllib.request.urlopen(f"http://127.0.0.1:{actual_port}/docs", timeout=1) server_ready = True break except Exception: time.sleep(0.3) if not server_ready: output = "".join(output_chunks) pytest.fail(f"Server did not become ready within {timeout}s.\nOutput:\n{output}") finally: # Clean up - terminate the server if proc.poll() is None: proc.send_signal(signal.SIGTERM) try: proc.wait(timeout=5) except subprocess.TimeoutExpired: proc.kill() proc.wait() class TestAllStarterProjectsLoad: """Test that all starter projects can load without lfx-specific module errors.""" @pytest.fixture def starter_projects_path(self) -> Path: """Get starter projects path.""" path = get_starter_projects_path() if not path.exists(): pytest.skip(f"Starter projects not found at {path}") return path def test_all_projects_load(self, starter_projects_path: Path): """Test all starter project JSONs can load without lfx-specific errors. Note: This test only fails on lfx-specific module errors (lfx.components.*), not on missing external dependencies (langchain_anthropic, etc.) which are expected when running with minimal dev dependencies. """ from lfx.load import load_flow_from_json json_files = list(starter_projects_path.glob("*.json")) assert len(json_files) > 0, "No starter project files found" lfx_module_errors = [] for json_file in json_files: try: graph = load_flow_from_json(json_file, disable_logs=True) assert graph is not None graph.prepare() except Exception as e: error_str = str(e) # Only track lfx-specific errors, not external dependency errors if "No module named 'lfx." in error_str: lfx_module_errors.append((json_file.name, str(e))) elif "resolve_component_path" in error_str: lfx_module_errors.append((json_file.name, f"Storage error: {e}")) # External dependency errors (langchain_anthropic, etc.) are acceptable # as lfx is designed to work with minimal dependencies if lfx_module_errors: error_details = "\n".join([f" {name}: {error}" for name, error in lfx_module_errors]) pytest.fail(f"LFX module errors in starter projects:\n{error_details}")
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/integration/cli/test_simple_agent_integration.py", "license": "MIT License", "lines": 358, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/inputs/test_model_input_fixes.py
"""Tests for ModelInput fixes. This module tests the following bug fixes: 1. Input port visibility: ModelInput should always show connection handle based on model_type. 2. Model defaults (cb6208f0ab): The first 5 models from each provider should be marked as default. """ from lfx.base.models.unified_models import get_unified_models_detailed from lfx.inputs.inputs import ModelInput class TestModelInputPortVisibility: """Test that ModelInput always shows connection handle based on model_type.""" def test_default_language_model_input_types(self): """By default, ModelInput should have input_types=['LanguageModel'] for language models.""" model_input = ModelInput(name="test_model") assert model_input.input_types == ["LanguageModel"] def test_default_embedding_model_input_types(self): """ModelInput with model_type='embedding' should have input_types=['Embeddings'].""" model_input = ModelInput(name="test_model", model_type="embedding") assert model_input.input_types == ["Embeddings"] def test_input_types_with_model_value(self): """Setting a model value should still have input_types set based on model_type.""" model_input = ModelInput(name="test_model", value=[{"name": "gpt-4o"}]) assert model_input.input_types == ["LanguageModel"] def test_string_value_normalization(self): """String values should be normalized to dict format and input_types should be set.""" model_input = ModelInput(name="test_model", value="gpt-4o") # Value should be normalized assert isinstance(model_input.value, list) if model_input.value: # May be normalized or fallback assert isinstance(model_input.value[0], dict) # Should have connection handle based on model_type assert model_input.input_types == ["LanguageModel"] class TestModelInputValueNormalization: """Test that ModelInput correctly normalizes various value formats.""" def test_single_string_normalized_to_dict(self): """Single string model name should be converted to list of dicts.""" model_input = ModelInput(name="test_model", value="gpt-4o") assert isinstance(model_input.value, list) if model_input.value: assert isinstance(model_input.value[0], dict) assert "name" in model_input.value[0] def test_list_of_strings_normalized(self): """List of string model names should be converted to list of dicts.""" model_input = ModelInput(name="test_model", value=["gpt-4o", "gpt-4o-mini"]) assert isinstance(model_input.value, list) if model_input.value: assert all(isinstance(item, dict) for item in model_input.value) assert all("name" in item for item in model_input.value) def test_dict_format_preserved(self): """List of dicts should be preserved as-is.""" value = [{"name": "gpt-4o", "provider": "OpenAI"}] model_input = ModelInput(name="test_model", value=value) assert model_input.value == value def test_none_value_handled(self): """None value should be handled gracefully.""" model_input = ModelInput(name="test_model", value=None) assert model_input.value is None def test_empty_string_handled(self): """Empty string should be handled gracefully.""" model_input = ModelInput(name="test_model", value="") assert model_input.value == "" class TestUnifiedModelsDefaults: """Test that first 5 models per provider are marked as default (fix: cb6208f0ab).""" def test_first_five_models_marked_default(self): """First 5 models from each provider should have default=True in metadata.""" all_providers = get_unified_models_detailed() for provider_data in all_providers: provider = provider_data["provider"] models = provider_data["models"] # Check first 5 models (or all if less than 5) num_to_check = min(5, len(models)) for i in range(num_to_check): model = models[i] assert model["metadata"].get("default") is True, ( f"Model {i} in provider {provider} should be marked as default" ) def test_models_after_five_not_default(self): """Models after the first 5 should not be marked as default.""" all_providers = get_unified_models_detailed() for provider_data in all_providers: provider = provider_data["provider"] models = provider_data["models"] # Check models after first 5 if len(models) > 5: for i in range(5, len(models)): model = models[i] # These should not have default=True assert model["metadata"].get("default") is not True, ( f"Model {i} in provider {provider} should not be marked as default" ) def test_only_defaults_filter_works(self): """When only_defaults=True, only first 5 models per provider are returned.""" all_providers = get_unified_models_detailed(only_defaults=True) for provider_data in all_providers: provider = provider_data["provider"] models = provider_data["models"] # Should have at most 5 models assert len(models) <= 5, f"Provider {provider} should have at most 5 models when only_defaults=True" # All returned models should have default=True for model in models: assert model["metadata"].get("default") is True, ( f"All models from {provider} with only_defaults=True should be marked as default" ) def test_defaults_not_affected_by_deprecated_filter(self): """Default marking should work independently of deprecated filtering.""" providers_normal = get_unified_models_detailed(include_deprecated=False) providers_with_deprecated = get_unified_models_detailed(include_deprecated=True) # Both should have defaults marked for provider_data in providers_normal: models = provider_data["models"] if models: # At least first model should be default (if any models exist) assert models[0]["metadata"].get("default") is True for provider_data in providers_with_deprecated: models = provider_data["models"] if models: # At least first model should be default (if any models exist) assert models[0]["metadata"].get("default") is True def test_defaults_applied_after_filtering(self): """Default marking should be based on list order after other filters are applied.""" # Get models for a specific provider providers = get_unified_models_detailed(providers=["OpenAI"]) if providers: provider_data = providers[0] models = provider_data["models"] # First 5 in the filtered list should be marked as default num_to_check = min(5, len(models)) for i in range(num_to_check): assert models[i]["metadata"].get("default") is True class TestModelInputRefreshButton: """Test that ModelInput has refresh_button enabled by default.""" def test_refresh_button_default_true(self): """ModelInput should have refresh_button=True by default.""" model_input = ModelInput(name="test_model") assert model_input.refresh_button is True def test_refresh_button_can_be_disabled(self): """ModelInput should allow disabling refresh_button.""" model_input = ModelInput(name="test_model", refresh_button=False) assert model_input.refresh_button is False class TestModelInputEmbeddingType: """Test that ModelInput correctly handles embedding model_type (fix for LE-278). input_types should always be set based on model_type: - "embedding" -> ["Embeddings"] - "language" (default) -> ["LanguageModel"] """ def test_language_model_type_default(self): """Default model_type should be 'language'.""" model_input = ModelInput(name="test_model") assert model_input.model_type == "language" def test_embedding_model_type_can_be_set(self): """model_type can be set to 'embedding'.""" model_input = ModelInput(name="test_model", model_type="embedding") assert model_input.model_type == "embedding" def test_language_type_sets_language_model_input_types(self): """When model_type='language', input_types should be ['LanguageModel'].""" model_input = ModelInput(name="test_model", model_type="language") assert model_input.input_types == ["LanguageModel"] def test_embedding_type_sets_embeddings_input_types(self): """When model_type='embedding', input_types should be ['Embeddings'].""" model_input = ModelInput(name="test_model", model_type="embedding") assert model_input.input_types == ["Embeddings"] def test_embedding_type_with_value(self): """Embedding model with value should still have input_types=['Embeddings'].""" model_input = ModelInput( name="test_model", model_type="embedding", value=[{"name": "text-embedding-ada-002"}], ) assert model_input.input_types == ["Embeddings"] def test_explicit_input_types_preserved_for_embedding(self): """If input_types is explicitly set, it should not be overwritten.""" model_input = ModelInput( name="test_model", model_type="embedding", input_types=["Embeddings"], ) # Should preserve the explicit input_types assert model_input.input_types == ["Embeddings"] def test_explicit_input_types_preserved_for_language(self): """If input_types is explicitly set for language model, it should not be overwritten.""" model_input = ModelInput( name="test_model", model_type="language", input_types=["LanguageModel"], ) # Should preserve the explicit input_types assert model_input.input_types == ["LanguageModel"]
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/inputs/test_model_input_fixes.py", "license": "MIT License", "lines": 185, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/base/data/cloud_storage_utils.py
"""Shared utilities for cloud storage operations (AWS S3 and Google Drive). This module provides common functionality used by both read and write file components to avoid code duplication. """ from __future__ import annotations import json from typing import Any def validate_aws_credentials(component: Any) -> None: """Validate that required AWS S3 credentials are present. Args: component: Component instance with AWS credential attributes Raises: ValueError: If any required credential is missing """ if not getattr(component, "aws_access_key_id", None): msg = "AWS Access Key ID is required for S3 storage" raise ValueError(msg) if not getattr(component, "aws_secret_access_key", None): msg = "AWS Secret Key is required for S3 storage" raise ValueError(msg) if not getattr(component, "bucket_name", None): msg = "S3 Bucket Name is required for S3 storage" raise ValueError(msg) def create_s3_client(component: Any): """Create and return a configured boto3 S3 client. Args: component: Component instance with AWS credential attributes Returns: boto3 S3 client instance Raises: ImportError: If boto3 is not installed """ try: import boto3 except ImportError as e: msg = "boto3 is not installed. Please install it using `uv pip install boto3`." raise ImportError(msg) from e client_config = { "aws_access_key_id": component.aws_access_key_id, "aws_secret_access_key": component.aws_secret_access_key, } if hasattr(component, "aws_region") and component.aws_region: client_config["region_name"] = component.aws_region return boto3.client("s3", **client_config) def parse_google_service_account_key(service_account_key: str) -> dict: """Parse Google service account JSON key with multiple fallback strategies. This function handles various common formatting issues when users paste service account keys, including: - Control characters - Extra whitespace - Double-encoded JSON strings - Escaped newlines in private_key field Args: service_account_key: Service account JSON key as string Returns: dict: Parsed service account credentials Raises: ValueError: If all parsing strategies fail """ credentials_dict = None parse_errors = [] # Strategy 1: Parse as-is with strict=False to allow control characters try: credentials_dict = json.loads(service_account_key, strict=False) except json.JSONDecodeError as e: parse_errors.append(f"Standard parse: {e!s}") # Strategy 2: Strip whitespace and try again if credentials_dict is None: try: cleaned_key = service_account_key.strip() credentials_dict = json.loads(cleaned_key, strict=False) except json.JSONDecodeError as e: parse_errors.append(f"Stripped parse: {e!s}") # Strategy 3: Check if it's double-encoded (JSON string of a JSON string) if credentials_dict is None: try: decoded_once = json.loads(service_account_key, strict=False) credentials_dict = json.loads(decoded_once, strict=False) if isinstance(decoded_once, str) else decoded_once except json.JSONDecodeError as e: parse_errors.append(f"Double-encoded parse: {e!s}") # Strategy 4: Try to fix common issues with newlines in the private_key field if credentials_dict is None: try: # Replace literal \n with actual newlines which is common in pasted JSON fixed_key = service_account_key.replace("\\n", "\n") credentials_dict = json.loads(fixed_key, strict=False) except json.JSONDecodeError as e: parse_errors.append(f"Newline-fixed parse: {e!s}") if credentials_dict is None: error_details = "; ".join(parse_errors) msg = ( f"Unable to parse service account key JSON. Tried multiple strategies: {error_details}. " "Please ensure you've copied the entire JSON content from your service account key file. " "The JSON should start with '{' and contain fields like 'type', 'project_id', 'private_key', etc." ) raise ValueError(msg) return credentials_dict def create_google_drive_service(service_account_key: str, scopes: list[str], *, return_credentials: bool = False): """Create and return a configured Google Drive API service. Args: service_account_key: Service account JSON key as string scopes: List of Google API scopes to request return_credentials: If True, return both service and credentials as tuple Returns: Google Drive API service instance, or tuple of (service, credentials) if return_credentials=True Raises: ImportError: If Google API client libraries are not installed ValueError: If credentials cannot be parsed """ try: from google.oauth2 import service_account from googleapiclient.discovery import build except ImportError as e: msg = "Google API client libraries are not installed. Please install them." raise ImportError(msg) from e credentials_dict = parse_google_service_account_key(service_account_key) credentials = service_account.Credentials.from_service_account_info(credentials_dict, scopes=scopes) service = build("drive", "v3", credentials=credentials) if return_credentials: return service, credentials return service
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/base/data/cloud_storage_utils.py", "license": "MIT License", "lines": 121, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/test_strip_dynamic_fields.py
"""Tests for the _strip_dynamic_fields function in build_component_index.py.""" import sys from pathlib import Path import pytest @pytest.fixture(scope="session") def strip_dynamic_fields_func(): """Fixture to import and provide the _strip_dynamic_fields function.""" script_path = Path(__file__).parent.parent.parent.parent.parent / "scripts" / "build_component_index.py" if not script_path.exists(): pytest.skip(f"build_component_index.py not found at {script_path}") import importlib.util spec = importlib.util.spec_from_file_location("build_component_index", script_path) build_module = importlib.util.module_from_spec(spec) sys.modules["build_component_index"] = build_module spec.loader.exec_module(build_module) return build_module._strip_dynamic_fields class TestStripDynamicFields: """Test cases for _strip_dynamic_fields function.""" def test_removes_timestamp_from_dict(self, strip_dynamic_fields_func): """Test that timestamp field is removed from a dictionary.""" data = {"name": "test", "timestamp": "2025-12-18 10:00:00", "value": 42} result = strip_dynamic_fields_func(data) assert "timestamp" not in result assert result["name"] == "test" assert result["value"] == 42 def test_preserves_non_dynamic_fields(self, strip_dynamic_fields_func): """Test that non-dynamic fields are preserved.""" data = {"name": "component", "version": "1.0.0", "metadata": {"key": "value"}, "options": ["a", "b"]} result = strip_dynamic_fields_func(data) assert result["name"] == "component" assert result["version"] == "1.0.0" assert result["metadata"] == {"key": "value"} def test_removes_timestamp_from_nested_dict(self, strip_dynamic_fields_func): """Test that timestamp is removed from nested dictionaries.""" data = {"level1": {"level2": {"timestamp": "2025-12-18 10:00:00", "data": "important"}}} result = strip_dynamic_fields_func(data) assert "timestamp" not in result["level1"]["level2"] assert result["level1"]["level2"]["data"] == "important" def test_removes_timestamp_from_list_items(self, strip_dynamic_fields_func): """Test that timestamp is removed from items in a list.""" data = [ {"timestamp": "2025-12-18 10:00:00", "id": 1}, {"timestamp": "2025-12-18 10:00:01", "id": 2}, {"id": 3}, ] result = strip_dynamic_fields_func(data) assert all("timestamp" not in item for item in result) assert result[0]["id"] == 1 assert result[1]["id"] == 2 assert result[2]["id"] == 3 def test_handles_empty_dict(self, strip_dynamic_fields_func): """Test that empty dictionary is handled correctly.""" result = strip_dynamic_fields_func({}) assert result == {} def test_handles_empty_list(self, strip_dynamic_fields_func): """Test that empty list is handled correctly.""" result = strip_dynamic_fields_func([]) assert result == [] def test_handles_primitives(self, strip_dynamic_fields_func): """Test that primitive types are returned unchanged.""" assert strip_dynamic_fields_func("string") == "string" assert strip_dynamic_fields_func(42) == 42 assert strip_dynamic_fields_func(3.14) == 3.14 assert strip_dynamic_fields_func(None) is None def test_complex_nested_structure(self, strip_dynamic_fields_func): """Test with a complex nested structure similar to component metadata.""" data = { "version": "1.7.0", "metadata": {"num_modules": 95, "num_components": 355}, "entries": [ [ "Model", { "AstraAssistantManager": { "display_name": "Astra Assistant Manager", "template": { "model_name": { "value": {"data": {"timestamp": "2025-12-18 20:55:52 UTC"}}, "options": ["gpt-4", "gpt-3.5-turbo"], } }, } }, ] ], } result = strip_dynamic_fields_func(data) assert result["version"] == "1.7.0" assert result["metadata"]["num_modules"] == 95 model_value = result["entries"][0][1]["AstraAssistantManager"]["template"]["model_name"]["value"]["data"] assert "timestamp" not in model_value assert result["entries"][0][0] == "Model" def test_mixed_list_with_dicts_and_primitives(self, strip_dynamic_fields_func): """Test list containing both dictionaries and primitives.""" data = [ {"timestamp": "2025-12-18", "value": 1}, "string_item", 42, {"id": 2}, ] result = strip_dynamic_fields_func(data) assert "timestamp" not in result[0] assert result[0]["value"] == 1 assert result[1] == "string_item" assert result[2] == 42 assert result[3] == {"id": 2} def test_multiple_timestamps_in_structure(self, strip_dynamic_fields_func): """Test that all timestamp fields at all levels are removed.""" data = { "timestamp": "2025-12-18 10:00:00", "nested": { "timestamp": "2025-12-18 10:00:01", "deep": {"timestamp": "2025-12-18 10:00:02", "value": "keep_this"}, }, "items": [{"timestamp": "2025-12-18 10:00:03", "id": 1}], } result = strip_dynamic_fields_func(data) assert "timestamp" not in result assert "timestamp" not in result["nested"] assert "timestamp" not in result["nested"]["deep"] assert "timestamp" not in result["items"][0] assert result["nested"]["deep"]["value"] == "keep_this" assert result["items"][0]["id"] == 1 def test_preserves_field_order_in_dict(self, strip_dynamic_fields_func): """Test that dictionary key order is preserved (Python 3.7+).""" data = {"aaa": 1, "bbb": 2, "timestamp": "remove_me", "ccc": 3} result = strip_dynamic_fields_func(data) keys = list(result.keys()) assert "timestamp" not in keys assert keys == ["aaa", "bbb", "ccc"] def test_deeply_nested_list_of_dicts(self, strip_dynamic_fields_func): """Test deeply nested list containing dictionaries with timestamps.""" data = { "items": [ { "nested_items": [ {"timestamp": "2025-12-18", "value": 1}, {"timestamp": "2025-12-18", "value": 2}, ] } ] } result = strip_dynamic_fields_func(data) nested_dicts = result["items"][0]["nested_items"] assert all("timestamp" not in d for d in nested_dicts) assert nested_dicts[0]["value"] == 1 assert nested_dicts[1]["value"] == 2
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_strip_dynamic_fields.py", "license": "MIT License", "lines": 147, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/src/lfx/run/base.py
"""Core run functionality for executing Langflow graphs.""" import json import re import sys import time from io import StringIO from pathlib import Path from typing import TYPE_CHECKING from lfx.cli.script_loader import ( extract_structured_result, extract_text_from_result, find_graph_variable, load_graph_from_script, ) from lfx.cli.validation import validate_global_variables_for_env from lfx.log.logger import logger from lfx.schema.schema import InputValueRequest if TYPE_CHECKING: from lfx.events.event_manager import EventManager # Verbosity level constants VERBOSITY_DETAILED = 2 VERBOSITY_FULL = 3 class RunError(Exception): """Exception raised when run execution fails.""" def __init__(self, message: str, exception: Exception | None = None): super().__init__(message) self.original_exception = exception def output_error(error_message: str, *, verbose: bool, exception: Exception | None = None) -> dict: """Create error response dict and optionally print to stderr when verbose.""" if verbose: sys.stderr.write(f"{error_message}\n") error_response = { "success": False, "type": "error", } # Add clean exception data if available if exception: error_response["exception_type"] = type(exception).__name__ error_response["exception_message"] = str(exception) else: error_response["exception_message"] = error_message return error_response async def run_flow( script_path: Path | None = None, input_value: str | None = None, input_value_option: str | None = None, output_format: str = "json", flow_json: str | None = None, *, stdin: bool = False, check_variables: bool = True, verbose: bool = False, verbose_detailed: bool = False, verbose_full: bool = False, timing: bool = False, global_variables: dict[str, str] | None = None, user_id: str | None = None, session_id: str | None = None, event_manager: "EventManager | None" = None, ) -> dict: """Execute a Langflow graph script or JSON flow and return the result. This function analyzes and executes either a Python script containing a Langflow graph, a JSON flow file, inline JSON, or JSON from stdin, returning the result as a dict. Args: script_path: Path to the Python script (.py) or JSON flow (.json) containing a graph input_value: Input value to pass to the graph (positional argument) input_value_option: Input value to pass to the graph (alternative option) output_format: Format for output (json, text, message, or result) flow_json: Inline JSON flow content as a string stdin: Read JSON flow content from stdin check_variables: Check global variables for environment compatibility verbose: Show basic progress information verbose_detailed: Show detailed progress and debug information verbose_full: Show full debugging output including component logs timing: Include detailed timing information in output global_variables: Dict of global variables to inject into the graph context user_id: Optional user ID for tracking purposes session_id: Optional session ID for memory isolation event_manager: Optional EventManager for streaming token events Returns: dict: Result data containing the execution results, logs, and optionally timing info Raises: RunError: If execution fails at any stage """ # Configure logger based on verbosity level from lfx.log.logger import configure if verbose_full: configure(log_level="DEBUG", output_file=sys.stderr) verbosity = 3 elif verbose_detailed: configure(log_level="DEBUG", output_file=sys.stderr) verbosity = 2 elif verbose: configure(log_level="INFO", output_file=sys.stderr) verbosity = 1 else: configure(log_level="CRITICAL", output_file=sys.stderr) verbosity = 0 start_time = time.time() if timing else None # Use either positional input_value or --input-value option final_input_value = input_value or input_value_option # Validate input sources - exactly one must be provided input_sources = [script_path is not None, flow_json is not None, bool(stdin)] if sum(input_sources) != 1: if sum(input_sources) == 0: error_msg = "No input source provided. Must provide either script_path, --flow-json, or --stdin" else: error_msg = ( "Multiple input sources provided. Cannot use script_path, --flow-json, and " "--stdin together. Choose exactly one." ) output_error(error_msg, verbose=verbose) raise RunError(error_msg, None) # Store parsed JSON dict for direct loading (avoids temp file round-trip) flow_dict: dict | None = None if flow_json is not None: if verbosity > 0: sys.stderr.write("Processing inline JSON content...\n") try: flow_dict = json.loads(flow_json) if verbosity > 0: sys.stderr.write("JSON content is valid\n") except json.JSONDecodeError as e: error_msg = f"Invalid JSON content: {e}" output_error(error_msg, verbose=verbose) raise RunError(error_msg, e) from e except Exception as e: error_msg = f"Error processing JSON content: {e}" output_error(error_msg, verbose=verbose) raise RunError(error_msg, e) from e elif stdin: if verbosity > 0: sys.stderr.write("Reading JSON content from stdin...\n") try: stdin_content = sys.stdin.read().strip() if not stdin_content: error_msg = "No content received from stdin" output_error(error_msg, verbose=verbose) raise RunError(error_msg, None) flow_dict = json.loads(stdin_content) if verbosity > 0: sys.stderr.write("JSON content from stdin is valid\n") except json.JSONDecodeError as e: error_msg = f"Invalid JSON content from stdin: {e}" output_error(error_msg, verbose=verbose) raise RunError(error_msg, e) from e except Exception as e: error_msg = f"Error reading from stdin: {e}" output_error(error_msg, verbose=verbose) raise RunError(error_msg, e) from e try: # Handle direct JSON dict (from stdin or --flow-json) if flow_dict is not None: if verbosity > 0: sys.stderr.write("Loading graph from JSON content...\n") from lfx.load import aload_flow_from_json graph = await aload_flow_from_json(flow_dict, disable_logs=not verbose) # Handle file path elif script_path is not None: if not script_path.exists(): error_msg = f"File '{script_path}' does not exist." raise ValueError(error_msg) if not script_path.is_file(): error_msg = f"'{script_path}' is not a file." raise ValueError(error_msg) file_extension = script_path.suffix.lower() if file_extension not in [".py", ".json"]: error_msg = f"'{script_path}' must be a .py or .json file." raise ValueError(error_msg) file_type = "Python script" if file_extension == ".py" else "JSON flow" if verbosity > 0: sys.stderr.write(f"Analyzing {file_type}: {script_path}\n") if file_extension == ".py": graph_info = find_graph_variable(script_path) if not graph_info: error_msg = ( "No 'graph' variable found in the script. " "Expected to find an assignment like: graph = Graph(...)" ) raise ValueError(error_msg) if verbosity > 0: sys.stderr.write(f"Found 'graph' variable at line {graph_info['line_number']}\n") sys.stderr.write(f"Type: {graph_info['type']}\n") sys.stderr.write(f"Source: {graph_info['source_line']}\n") sys.stderr.write("Loading and executing script...\n") graph = await load_graph_from_script(script_path) else: # .json file if verbosity > 0: sys.stderr.write("Valid JSON flow file detected\n") sys.stderr.write("Loading and executing JSON flow\n") from lfx.load import aload_flow_from_json graph = await aload_flow_from_json(script_path, disable_logs=not verbose) else: error_msg = "No input source provided" raise ValueError(error_msg) # Set user_id on graph if provided (required for some components like AgentComponent) if user_id: graph.user_id = user_id if verbosity > 0: logger.info(f"Set graph user_id: {user_id}") # Set session_id on graph if provided (isolates memory between requests) if session_id: graph.session_id = session_id if verbosity > 0: logger.info(f"Set graph session_id: {session_id}") # Inject global variables into graph context if global_variables: if "request_variables" not in graph.context: graph.context["request_variables"] = {} graph.context["request_variables"].update(global_variables) if verbosity > 0: # Log keys only to avoid leaking sensitive data logger.info(f"Injected global variables: {list(global_variables.keys())}") except Exception as e: error_type = type(e).__name__ logger.error(f"Graph loading failed with {error_type}") if verbosity > 0: # Try to identify common error patterns if "ModuleNotFoundError" in str(e) or "No module named" in str(e): logger.info("This appears to be a missing dependency issue") if "langchain" in str(e).lower(): match = re.search(r"langchain_(.*)", str(e).lower()) if match: module_name = match.group(1) logger.info( f"Missing LangChain dependency detected. Try: pip install langchain-{module_name}", ) elif "ImportError" in str(e): logger.info("This appears to be an import issue - check component dependencies") elif "AttributeError" in str(e): logger.info("This appears to be a component configuration issue") # Show full traceback in debug mode logger.exception("Failed to load graph.") error_msg = f"Failed to load graph. {e}" output_error(error_msg, verbose=verbose, exception=e) raise RunError(error_msg, e) from e inputs = InputValueRequest(input_value=final_input_value) if final_input_value else None # Mark end of loading phase if timing load_end_time = time.time() if timing else None if verbosity > 0: sys.stderr.write("Preparing graph for execution...\n") try: # Add detailed preparation steps if verbosity > 0: logger.debug(f"Graph contains {len(graph.vertices)} vertices") logger.debug(f"Graph contains {len(graph.edges)} edges") # Show component types being used component_types = set() for vertex in graph.vertices: if hasattr(vertex, "display_name"): component_types.add(vertex.display_name) logger.debug(f"Component types in graph: {', '.join(sorted(component_types))}") graph.prepare() logger.info("Graph preparation completed") # Validate global variables for environment compatibility if check_variables: logger.info("Validating global variables...") validation_errors = validate_global_variables_for_env(graph) if validation_errors: error_details = "Global variable validation failed: " + "; ".join(validation_errors) logger.info(f"Variable validation failed: {len(validation_errors)} errors") for error in validation_errors: logger.debug(f"Validation error: {error}") output_error(error_details, verbose=verbose) raise RunError(error_details, None) logger.info("Global variable validation passed") else: logger.info("Global variable validation skipped") except RunError: raise except Exception as e: error_type = type(e).__name__ logger.info(f"Graph preparation failed with {error_type}") if verbosity > 0: logger.debug(f"Preparation error: {e!s}") logger.exception("Failed to prepare graph - full traceback:") error_msg = f"Failed to prepare graph: {e}" output_error(error_msg, verbose=verbose, exception=e) raise RunError(error_msg, e) from e logger.info("Executing graph...") execution_start_time = time.time() if timing else None if verbose: logger.debug("Setting up execution environment") if inputs: logger.debug(f"Input provided: {inputs.input_value}") else: logger.debug("No input provided") captured_stdout = StringIO() captured_stderr = StringIO() original_stdout = sys.stdout original_stderr = sys.stderr # Track component timing if requested component_timings = [] if timing else None execution_step_start = execution_start_time if timing else None result_count = 0 try: sys.stdout = captured_stdout # Don't capture stderr at high verbosity levels to avoid duplication with direct logging if verbosity < VERBOSITY_FULL: sys.stderr = captured_stderr results = [] logger.info("Starting graph execution...", level="DEBUG") async for result in graph.async_start(inputs, event_manager=event_manager): result_count += 1 if verbosity > 0: logger.debug(f"Processing result #{result_count}") if hasattr(result, "vertex") and hasattr(result.vertex, "display_name"): logger.debug(f"Component: {result.vertex.display_name}") if timing: step_end_time = time.time() step_duration = step_end_time - execution_step_start # Extract component information if hasattr(result, "vertex"): component_name = getattr(result.vertex, "display_name", "Unknown") component_id = getattr(result.vertex, "id", "Unknown") component_timings.append( { "component": component_name, "component_id": component_id, "duration": step_duration, "cumulative_time": step_end_time - execution_start_time, } ) execution_step_start = step_end_time results.append(result) logger.info(f"Graph execution completed. Processed {result_count} results") except Exception as e: error_type = type(e).__name__ logger.info(f"Graph execution failed with {error_type}") if verbosity >= VERBOSITY_DETAILED: # Only show details at -vv and above logger.debug(f"Failed after processing {result_count} results") # Only show component output at maximum verbosity (-vvv) if verbosity >= VERBOSITY_FULL: # Capture any output that was generated before the error # Only show captured stdout since stderr logging is already shown directly in verbose mode captured_content = captured_stdout.getvalue() if captured_content.strip(): # Check if captured content contains the same error that will be displayed at the end error_text = str(e) captured_lines = captured_content.strip().split("\n") # Filter out lines that are duplicates of the final error message unique_lines = [ line for line in captured_lines if not any( error_part.strip() in line for error_part in error_text.split("\n") if error_part.strip() ) ] if unique_lines: logger.info("Component output before error:", level="DEBUG") for line in unique_lines: # Log each line directly using the logger to avoid nested formatting if verbosity > 0: # Remove any existing timestamp prefix to avoid duplication clean_line = line if "] " in line and line.startswith("2025-"): # Extract just the log message after the timestamp and level parts = line.split("] ", 1) if len(parts) > 1: clean_line = parts[1] logger.debug(clean_line) # Provide context about common execution errors if "list can't be used in 'await' expression" in str(e): logger.info("This appears to be an async/await mismatch in a component") logger.info("Check that async methods are properly awaited") elif "AttributeError" in error_type and "NoneType" in str(e): logger.info("This appears to be a null reference error") logger.info("A component may be receiving unexpected None values") elif "ConnectionError" in str(e) or "TimeoutError" in str(e): logger.info("This appears to be a network connectivity issue") logger.info("Check API keys and network connectivity") logger.exception("Failed to execute graph - full traceback:") sys.stdout = original_stdout sys.stderr = original_stderr error_msg = f"Failed to execute graph: {e}" output_error(error_msg, verbose=verbosity > 0, exception=e) raise RunError(error_msg, e) from e finally: sys.stdout = original_stdout sys.stderr = original_stderr execution_end_time = time.time() if timing else None captured_logs = captured_stdout.getvalue() + captured_stderr.getvalue() # Create timing metadata if requested timing_metadata = None if timing: load_duration = load_end_time - start_time execution_duration = execution_end_time - execution_start_time total_duration = execution_end_time - start_time timing_metadata = { "load_time": round(load_duration, 3), "execution_time": round(execution_duration, 3), "total_time": round(total_duration, 3), "component_timings": [ { "component": ct["component"], "component_id": ct["component_id"], "duration": round(ct["duration"], 3), "cumulative_time": round(ct["cumulative_time"], 3), } for ct in component_timings ], } # Build result based on output format if output_format == "json": result_data = extract_structured_result(results) result_data["logs"] = captured_logs if timing_metadata: result_data["timing"] = timing_metadata return result_data if output_format in {"text", "message"}: result_data = extract_structured_result(results) output_text = result_data.get("result", result_data.get("text", "")) return {"output": str(output_text), "format": output_format} if output_format == "result": return {"output": extract_text_from_result(results), "format": "result"} # Default case result_data = extract_structured_result(results) result_data["logs"] = captured_logs if timing_metadata: result_data["timing"] = timing_metadata return result_data
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/src/lfx/run/base.py", "license": "MIT License", "lines": 421, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/lfx/tests/unit/run/test_base.py
"""Unit tests for the run.base module. This module demonstrates different testing approaches: 1. UNIT TESTS (with mocks): Test individual functions in isolation 2. INTEGRATION TESTS (with real components): Test with actual graphs and components 3. ENVIRONMENT-BASED TESTS: Test with real environment variable injection Strategies to reduce mocking: - Use real components for simple functionality - Create test-specific components that are predictable - Test actual graph execution for critical paths - Mock only external dependencies (file I/O, network calls, etc.) """ import json from io import StringIO from unittest.mock import MagicMock, patch import pytest from lfx.run.base import RunError, output_error, run_flow class TestRunError: """Tests for the RunError exception class.""" def test_run_error_with_message_only(self): """Test RunError with just a message.""" error = RunError("Test error message") assert str(error) == "Test error message" assert error.original_exception is None def test_run_error_with_original_exception(self): """Test RunError with an original exception.""" original = ValueError("Original error") error = RunError("Wrapper message", original) assert str(error) == "Wrapper message" assert error.original_exception is original assert isinstance(error.original_exception, ValueError) def test_run_error_inheritance(self): """Test that RunError inherits from Exception.""" error = RunError("Test") assert isinstance(error, Exception) class TestOutputError: """Tests for the output_error helper function.""" def test_output_error_returns_dict(self): """Test that output_error returns a proper dict structure.""" result = output_error("Test error", verbose=False) assert isinstance(result, dict) assert result["success"] is False assert result["type"] == "error" assert result["exception_message"] == "Test error" def test_output_error_with_exception(self): """Test output_error with an exception provided.""" exc = ValueError("Value error message") result = output_error("Test error", verbose=False, exception=exc) assert result["exception_type"] == "ValueError" assert result["exception_message"] == "Value error message" def test_output_error_verbose_writes_to_stderr(self, capsys): """Test that verbose mode writes to stderr.""" output_error("Test error", verbose=True) captured = capsys.readouterr() assert "Test error" in captured.err def test_output_error_non_verbose_silent(self, capsys): """Test that non-verbose mode doesn't write to stderr.""" output_error("Test error", verbose=False) captured = capsys.readouterr() assert captured.err == "" class TestRunFlowInputValidation: """Tests for run_flow input source validation.""" @pytest.mark.asyncio async def test_no_input_source_raises_error(self): """Test that providing no input source raises RunError.""" with pytest.raises(RunError) as exc_info: await run_flow( script_path=None, flow_json=None, stdin=False, ) assert "No input source provided" in str(exc_info.value) @pytest.mark.asyncio async def test_multiple_input_sources_raises_error(self, tmp_path): """Test that providing multiple input sources raises RunError.""" script = tmp_path / "test.py" script.write_text("graph = None") with pytest.raises(RunError) as exc_info: await run_flow( script_path=script, flow_json='{"data": {}}', stdin=False, ) assert "Multiple input sources provided" in str(exc_info.value) @pytest.mark.asyncio async def test_script_path_and_stdin_raises_error(self, tmp_path): """Test that script_path + stdin raises RunError.""" script = tmp_path / "test.py" script.write_text("graph = None") with pytest.raises(RunError) as exc_info: await run_flow( script_path=script, flow_json=None, stdin=True, ) assert "Multiple input sources provided" in str(exc_info.value) class TestRunFlowFileValidation: """Tests for run_flow file path validation.""" @pytest.mark.asyncio async def test_nonexistent_file_raises_error(self, tmp_path): """Test that a non-existent file raises RunError.""" nonexistent = tmp_path / "does_not_exist.py" with pytest.raises(RunError) as exc_info: await run_flow(script_path=nonexistent) assert "does not exist" in str(exc_info.value) @pytest.mark.asyncio async def test_directory_instead_of_file_raises_error(self, tmp_path): """Test that a directory raises RunError.""" directory = tmp_path / "test_dir" directory.mkdir() with pytest.raises(RunError) as exc_info: await run_flow(script_path=directory) assert "is not a file" in str(exc_info.value) @pytest.mark.asyncio async def test_invalid_extension_raises_error(self, tmp_path): """Test that an invalid file extension raises RunError.""" txt_file = tmp_path / "test.txt" txt_file.write_text("not a script") with pytest.raises(RunError) as exc_info: await run_flow(script_path=txt_file) assert "must be a .py or .json file" in str(exc_info.value) class TestRunFlowJsonInput: """Tests for run_flow with flow_json input.""" @pytest.mark.asyncio async def test_invalid_json_raises_error(self): """Test that invalid JSON raises RunError.""" with pytest.raises(RunError) as exc_info: await run_flow(flow_json='{"nodes": [invalid') assert "Invalid JSON content" in str(exc_info.value) @pytest.mark.asyncio async def test_valid_json_creates_temp_file_and_loads_graph(self): """Test that valid JSON creates a temporary file and loads the graph.""" valid_json = '{"data": {"nodes": [], "edges": []}}' # Mock the load functions to avoid actual execution with ( patch("lfx.load.aload_flow_from_json") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start mock_load.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test"} result = await run_flow(flow_json=valid_json) # The function should have loaded from JSON successfully mock_load.assert_called_once() assert result["success"] is True class TestRunFlowStdinInput: """Tests for run_flow with stdin input.""" @pytest.mark.asyncio async def test_empty_stdin_raises_error(self): """Test that empty stdin raises RunError.""" with patch("sys.stdin", StringIO("")): with pytest.raises(RunError) as exc_info: await run_flow(stdin=True) assert "No content received from stdin" in str(exc_info.value) @pytest.mark.asyncio async def test_invalid_stdin_json_raises_error(self): """Test that invalid JSON from stdin raises RunError.""" with patch("sys.stdin", StringIO('{"invalid": json')): with pytest.raises(RunError) as exc_info: await run_flow(stdin=True) assert "Invalid JSON content from stdin" in str(exc_info.value) class TestRunFlowPythonScript: """Tests for run_flow with Python script input.""" @pytest.fixture def valid_script(self, tmp_path): """Create a valid Python script with a graph variable.""" script_content = """ from lfx.components.input_output import ChatInput, ChatOutput from lfx.graph import Graph chat_input = ChatInput() chat_output = ChatOutput().set(input_value=chat_input.message_response) graph = Graph(chat_input, chat_output) """ script_path = tmp_path / "valid_script.py" script_path.write_text(script_content) return script_path @pytest.fixture def no_graph_script(self, tmp_path): """Create a script without a graph variable.""" script_content = """ from lfx.components.input_output import ChatInput chat_input = ChatInput() # No graph variable """ script_path = tmp_path / "no_graph.py" script_path.write_text(script_content) return script_path @pytest.mark.asyncio async def test_no_graph_variable_raises_error(self, no_graph_script): """Test that a script without graph variable raises RunError.""" with pytest.raises(RunError) as exc_info: await run_flow(script_path=no_graph_script) assert "No 'graph' variable found" in str(exc_info.value) class TestRunFlowGlobalVariables: """Tests for run_flow global variables injection.""" @pytest.mark.asyncio async def test_global_variables_none_does_not_inject(self, tmp_path): """Test that global_variables=None does not inject anything.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = Graph(...)"} mock_load.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test"} await run_flow(script_path=script_path, global_variables=None) # Verify request_variables was not set in context assert "request_variables" not in mock_graph.context @pytest.mark.asyncio async def test_global_variables_injected_into_context(self, tmp_path): """Test that global variables are injected into graph context.""" script_content = """ from lfx.components.input_output import ChatInput, ChatOutput from lfx.graph import Graph chat_input = ChatInput() chat_output = ChatOutput().set(input_value=chat_input.message_response) graph = Graph(chat_input, chat_output) """ script_path = tmp_path / "test_script.py" script_path.write_text(script_content) mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = Graph(...)"} mock_load.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test"} global_vars = {"API_KEY": "secret123", "DEBUG": "true"} await run_flow( script_path=script_path, global_variables=global_vars, ) # Verify global variables were injected assert "request_variables" in mock_graph.context assert mock_graph.context["request_variables"]["API_KEY"] == "secret123" assert mock_graph.context["request_variables"]["DEBUG"] == "true" class TestRunFlowOutputFormats: """Tests for run_flow output format handling.""" @pytest.fixture def mock_successful_execution(self): """Set up mocks for successful graph execution.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start return mock_graph @pytest.mark.asyncio async def test_json_format_returns_structured_result(self, tmp_path, mock_successful_execution): """Test that JSON format returns structured result with logs.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test output"} result = await run_flow(script_path=script_path, output_format="json") assert "logs" in result assert result["success"] is True @pytest.mark.asyncio async def test_text_format_returns_output_dict(self, tmp_path, mock_successful_execution): """Test that text format returns dict with output key.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract.return_value = {"result": "test output"} result = await run_flow(script_path=script_path, output_format="text") assert "output" in result assert result["format"] == "text" @pytest.mark.asyncio async def test_message_format_returns_output_dict(self, tmp_path, mock_successful_execution): """Test that message format returns dict with output key.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract.return_value = {"result": "test output"} result = await run_flow(script_path=script_path, output_format="message") assert "output" in result assert result["format"] == "message" @pytest.mark.asyncio async def test_result_format_extracts_text(self, tmp_path, mock_successful_execution): """Test that result format uses extract_text_from_result.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_text_from_result") as mock_extract_text, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract_text.return_value = "extracted text" result = await run_flow(script_path=script_path, output_format="result") assert result["output"] == "extracted text" assert result["format"] == "result" class TestRunFlowTiming: """Tests for run_flow timing functionality.""" @pytest.fixture def mock_successful_execution(self): """Set up mocks for successful graph execution.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() # Create mock results with vertex info mock_result = MagicMock() mock_result.vertex = MagicMock() mock_result.vertex.display_name = "TestComponent" mock_result.vertex.id = "test-id-123" async def mock_async_start(_inputs, **_kwargs): yield mock_result mock_graph.async_start = mock_async_start return mock_graph @pytest.mark.asyncio async def test_timing_includes_metadata(self, tmp_path, mock_successful_execution): """Test that timing=True includes timing metadata in result.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test"} result = await run_flow(script_path=script_path, timing=True) assert "timing" in result assert "load_time" in result["timing"] assert "execution_time" in result["timing"] assert "total_time" in result["timing"] assert "component_timings" in result["timing"] @pytest.mark.asyncio async def test_timing_false_excludes_metadata(self, tmp_path, mock_successful_execution): """Test that timing=False excludes timing metadata.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_successful_execution mock_validate.return_value = [] mock_extract.return_value = {"success": True, "result": "test"} result = await run_flow(script_path=script_path, timing=False) assert "timing" not in result class TestRunFlowVerbosity: """Tests for run_flow verbosity levels.""" @pytest.mark.asyncio async def test_verbose_false_configures_critical_logging(self, tmp_path): """Test that verbose=False configures CRITICAL log level.""" import sys script_path = tmp_path / "test.py" script_path.write_text("graph = None") # Get the actual module from sys.modules (not the instance exported by __init__.py) log_module = sys.modules["lfx.log.logger"] with ( patch.object(log_module, "configure") as mock_configure, patch("lfx.run.base.find_graph_variable") as mock_find, ): mock_find.return_value = None # This will cause an error, but we check configure was called with pytest.raises(RunError): await run_flow(script_path=script_path, verbose=False) mock_configure.assert_called() call_args = mock_configure.call_args assert call_args.kwargs.get("log_level") == "CRITICAL" @pytest.mark.asyncio async def test_verbose_true_configures_info_logging(self, tmp_path): """Test that verbose=True configures INFO log level.""" import sys script_path = tmp_path / "test.py" script_path.write_text("graph = None") log_module = sys.modules["lfx.log.logger"] with ( patch.object(log_module, "configure") as mock_configure, patch("lfx.run.base.find_graph_variable") as mock_find, ): mock_find.return_value = None with pytest.raises(RunError): await run_flow(script_path=script_path, verbose=True) mock_configure.assert_called() call_args = mock_configure.call_args assert call_args.kwargs.get("log_level") == "INFO" @pytest.mark.asyncio async def test_verbose_detailed_configures_debug_logging(self, tmp_path): """Test that verbose_detailed=True configures DEBUG log level.""" import sys script_path = tmp_path / "test.py" script_path.write_text("graph = None") log_module = sys.modules["lfx.log.logger"] with ( patch.object(log_module, "configure") as mock_configure, patch("lfx.run.base.find_graph_variable") as mock_find, ): mock_find.return_value = None with pytest.raises(RunError): await run_flow(script_path=script_path, verbose_detailed=True) mock_configure.assert_called() call_args = mock_configure.call_args assert call_args.kwargs.get("log_level") == "DEBUG" @pytest.mark.asyncio async def test_verbose_full_configures_debug_logging(self, tmp_path): """Test that verbose_full=True configures DEBUG log level.""" import sys script_path = tmp_path / "test.py" script_path.write_text("graph = None") log_module = sys.modules["lfx.log.logger"] with ( patch.object(log_module, "configure") as mock_configure, patch("lfx.run.base.find_graph_variable") as mock_find, ): mock_find.return_value = None with pytest.raises(RunError): await run_flow(script_path=script_path, verbose_full=True) mock_configure.assert_called() call_args = mock_configure.call_args assert call_args.kwargs.get("log_level") == "DEBUG" class TestRunFlowVariableValidation: """Tests for run_flow global variable validation.""" @pytest.fixture def mock_graph_with_validation_errors(self): """Set up mock graph that triggers validation errors.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() return mock_graph @pytest.mark.asyncio async def test_validation_errors_raise_run_error(self, tmp_path, mock_graph_with_validation_errors): """Test that validation errors raise RunError.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph_with_validation_errors mock_validate.return_value = ["Missing required variable: API_KEY"] with pytest.raises(RunError) as exc_info: await run_flow(script_path=script_path, check_variables=True) assert "Global variable validation failed" in str(exc_info.value) @pytest.mark.asyncio async def test_check_variables_false_skips_validation(self, tmp_path): """Test that check_variables=False skips validation.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph mock_extract.return_value = {"success": True} await run_flow(script_path=script_path, check_variables=False) # validate_global_variables_for_env should not be called mock_validate.assert_not_called() class TestRunFlowInputValueHandling: """Tests for run_flow input value handling.""" @pytest.mark.asyncio async def test_input_value_takes_precedence(self, tmp_path): """Test that input_value takes precedence over input_value_option.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, patch("lfx.run.base.InputValueRequest") as mock_input_request, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True} await run_flow( script_path=script_path, input_value="positional", input_value_option="option", ) # InputValueRequest should be called with the positional value mock_input_request.assert_called_once_with(input_value="positional") @pytest.mark.asyncio async def test_input_value_option_used_when_no_positional(self, tmp_path): """Test that input_value_option is used when input_value is None.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, patch("lfx.run.base.InputValueRequest") as mock_input_request, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True} await run_flow( script_path=script_path, input_value=None, input_value_option="option_value", ) mock_input_request.assert_called_once_with(input_value="option_value") class TestRunFlowJsonFileExecution: """Tests for run_flow JSON file execution.""" @pytest.fixture def simple_json_flow(self, tmp_path): """Create a simple JSON flow file.""" flow_data = { "data": { "nodes": [ { "id": "ChatInput-1", "type": "ChatInput", "data": {"display_name": "Chat Input"}, }, ], "edges": [], } } json_path = tmp_path / "flow.json" json_path.write_text(json.dumps(flow_data)) return json_path @pytest.mark.asyncio async def test_json_file_calls_aload_flow_from_json(self, simple_json_flow): """Test that JSON file uses aload_flow_from_json.""" mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def mock_async_start(_inputs, **_kwargs): yield mock_graph.async_start = mock_async_start with ( patch("lfx.load.aload_flow_from_json") as mock_load_json, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, patch("lfx.run.base.extract_structured_result") as mock_extract, ): mock_load_json.return_value = mock_graph mock_validate.return_value = [] mock_extract.return_value = {"success": True} await run_flow(script_path=simple_json_flow) mock_load_json.assert_called_once() call_args = mock_load_json.call_args assert call_args[0][0] == simple_json_flow class TestRunFlowEnvironmentIntegration: """Integration tests for run_flow with environment variables and real components.""" @pytest.fixture def simple_env_script(self, tmp_path): """Create a simple script that uses environment variables.""" script_content = """ from lfx.components.input_output import ChatInput, ChatOutput from lfx.custom.custom_component.component import Component from lfx.template.field.base import Output, Input from lfx.schema.message import Message from lfx.graph import Graph class EnvReader(Component): inputs = [Input(name="trigger", input_types=["Message"], field_type="Message")] outputs = [Output(name="env_value", method="get_env_value", types=["Message"])] def get_env_value(self) -> Message: # Access request_variables from graph context request_variables = self.graph.context.get("request_variables", {}) # Get TEST_VAR value = request_variables.get("TEST_VAR", "Not Found") return Message(text=f"Value: {value}") chat_input = ChatInput() env_reader = EnvReader() env_reader.set(trigger=chat_input.message_response) chat_output = ChatOutput().set(input_value=env_reader.get_env_value) graph = Graph(chat_input, chat_output) """ script_path = tmp_path / "env_script.py" script_path.write_text(script_content) return script_path @pytest.mark.asyncio async def test_run_flow_with_env_vars_integration(self, simple_env_script): """Integration test that uses environment variables with real components.""" global_vars = {"TEST_VAR": "Hello World"} result = await run_flow( script_path=simple_env_script, global_variables=global_vars, verbose=False, check_variables=False, # Skip validation for this test ) assert result["success"] is True assert "Value: Hello World" in result["result"] @pytest.mark.asyncio async def test_run_flow_without_env_vars_integration(self, simple_env_script): """Integration test without environment variables.""" result = await run_flow( script_path=simple_env_script, global_variables=None, verbose=False, check_variables=False, # Skip validation for this test ) assert result["success"] is True assert "Value: Not Found" in result["result"] class TestRunFlowExecutionErrors: """Tests for run_flow execution error handling.""" @pytest.mark.asyncio async def test_graph_execution_error_raises_run_error(self, tmp_path): """Test that graph execution errors are wrapped in RunError.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock() async def failing_async_start(_inputs, **_kwargs): msg = "Execution failed" raise ValueError(msg) yield # Required to make it an async generator mock_graph.async_start = failing_async_start with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, patch("lfx.run.base.validate_global_variables_for_env") as mock_validate, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph mock_validate.return_value = [] with pytest.raises(RunError) as exc_info: await run_flow(script_path=script_path) assert "Failed to execute graph" in str(exc_info.value) assert exc_info.value.original_exception is not None @pytest.mark.asyncio async def test_graph_preparation_error_raises_run_error(self, tmp_path): """Test that graph preparation errors are wrapped in RunError.""" script_path = tmp_path / "test.py" script_path.write_text("graph = None") mock_graph = MagicMock() mock_graph.context = {} mock_graph.vertices = [] mock_graph.edges = [] mock_graph.prepare = MagicMock(side_effect=RuntimeError("Preparation failed")) with ( patch("lfx.run.base.find_graph_variable") as mock_find, patch("lfx.run.base.load_graph_from_script") as mock_load, ): mock_find.return_value = {"line_number": 1, "type": "Graph", "source_line": "graph = ..."} mock_load.return_value = mock_graph with pytest.raises(RunError) as exc_info: await run_flow(script_path=script_path) assert "Failed to prepare graph" in str(exc_info.value)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/run/test_base.py", "license": "MIT License", "lines": 740, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/lfx/tests/unit/run/test_base_integration.py
"""Integration tests for run.base module with minimal mocking. This file demonstrates how to test run_flow with real components and graphs, reducing the need for extensive mocking while still maintaining test isolation. """ from pathlib import Path import pytest from lfx.run.base import RunError, run_flow class TestRunFlowIntegrationMinimalMocking: """Integration tests that use real components with minimal mocking.""" @pytest.fixture def simple_python_graph(self, tmp_path): """Create a simple Python graph script that can actually run.""" script_content = """ from lfx.components.input_output import ChatInput, ChatOutput from lfx.graph import Graph # Create a simple pass-through graph chat_input = ChatInput() chat_output = ChatOutput().set(input_value=chat_input.message_response) graph = Graph(chat_input, chat_output) """ script_path = tmp_path / "simple_graph.py" script_path.write_text(script_content) return script_path @pytest.fixture def env_consuming_graph(self, tmp_path): """Create a graph that reads from environment variables.""" script_content = ''' from lfx.components.input_output import ChatInput, ChatOutput from lfx.custom.custom_component.component import Component from lfx.template.field.base import Output, Input from lfx.schema.message import Message from lfx.graph import Graph class EnvironmentReader(Component): """A simple component that reads environment variables.""" inputs = [Input(name="trigger", input_types=["Message"], field_type="Message")] outputs = [Output(name="result", method="read_env", types=["Message"])] def read_env(self) -> Message: # Read from graph context (where env vars are injected) env_vars = self.graph.context.get("request_variables", {}) api_key = env_vars.get("API_KEY", "no-key") return Message(text=f"API_KEY={api_key}") chat_input = ChatInput() env_reader = EnvironmentReader() env_reader.set(trigger=chat_input.message_response) chat_output = ChatOutput().set(input_value=env_reader.read_env) graph = Graph(chat_input, chat_output) ''' script_path = tmp_path / "env_graph.py" script_path.write_text(script_content) return script_path @pytest.mark.asyncio async def test_run_flow_with_real_graph_no_env_vars(self, simple_python_graph): """Test run_flow with a real graph, no environment variables.""" result = await run_flow( script_path=simple_python_graph, input_value="Hello World", global_variables=None, verbose=False, check_variables=False, # Skip validation for speed timing=False, ) assert result["success"] is True assert "Hello World" in result["result"] @pytest.mark.asyncio async def test_run_flow_with_env_vars_real_graph(self, env_consuming_graph): """Test run_flow with environment variables using a real graph.""" env_vars = {"API_KEY": "test-key-12345"} result = await run_flow( script_path=env_consuming_graph, input_value="test", global_variables=env_vars, verbose=False, check_variables=False, timing=False, ) assert result["success"] is True assert "API_KEY=test-key-12345" in result["result"] @pytest.mark.asyncio async def test_run_flow_without_env_vars_shows_default(self, env_consuming_graph): """Test that missing env vars show default values.""" result = await run_flow( script_path=env_consuming_graph, input_value="test", global_variables=None, # No env vars provided verbose=False, check_variables=False, timing=False, ) assert result["success"] is True assert "API_KEY=no-key" in result["result"] @pytest.mark.asyncio async def test_run_flow_json_inline_input(self): """Test run_flow with inline JSON (minimal external dependencies).""" # Create a simple JSON structure that represents a basic flow # Note: This would need to match the actual JSON format expected by the system # For now, we'll test the JSON processing part with inline JSON # that will be converted to a temp file simple_json = '{"test": "data"}' # This will fail at graph loading, but tests JSON processing with pytest.raises(RunError): # Will fail when trying to load the graph await run_flow(flow_json=simple_json, verbose=False, check_variables=False) @pytest.mark.asyncio async def test_run_flow_stdin_input(self): """Test run_flow with stdin input.""" import sys from io import StringIO # Create mock stdin content stdin_content = '{"test": "stdin_data"}' # Mock stdin old_stdin = sys.stdin sys.stdin = StringIO(stdin_content) try: with pytest.raises(RunError): # Will fail at graph loading await run_flow(stdin=True, verbose=False, check_variables=False) finally: sys.stdin = old_stdin class TestRunFlowErrorHandlingIntegration: """Integration tests for error handling with real components.""" @pytest.mark.asyncio async def test_run_flow_invalid_script_path(self): """Test error handling with non-existent script.""" nonexistent = Path("/definitely/does/not/exist.py") with pytest.raises(RunError): await run_flow(script_path=nonexistent) @pytest.mark.asyncio async def test_run_flow_invalid_file_extension(self, tmp_path): """Test error handling with invalid file extension.""" invalid_file = tmp_path / "test.txt" invalid_file.write_text("not a script") with pytest.raises(RunError): await run_flow(script_path=invalid_file) # Example of how to create a test utility for common graph patterns def create_test_graph_with_env_reader(tmp_path, env_var_name="TEST_VAR", default_value="default"): """Utility function to create a test graph that reads environment variables.""" script_content = f""" from lfx.components.input_output import ChatInput, ChatOutput from lfx.custom.custom_component.component import Component from lfx.template.field.base import Output, Input from lfx.schema.message import Message from lfx.graph import Graph class EnvReader(Component): inputs = [Input(name="trigger", input_types=["Message"], field_type="Message")] outputs = [Output(name="result", method="read_env", types=["Message"])] def read_env(self) -> Message: env_vars = self.graph.context.get("request_variables", {{}}) value = env_vars.get("{env_var_name}", "{default_value}") return Message(text=f"{env_var_name}={{value}}") chat_input = ChatInput() env_reader = EnvReader() env_reader.set(trigger=chat_input.message_response) chat_output = ChatOutput().set(input_value=env_reader.read_env) graph = Graph(chat_input, chat_output) """ script_path = tmp_path / f"env_reader_{env_var_name.lower()}.py" script_path.write_text(script_content) return script_path class TestRunFlowWithTestUtilities: """Tests using test utility functions for reduced code duplication.""" @pytest.mark.asyncio async def test_env_reader_with_custom_var(self, tmp_path): """Test environment variable reading with custom variable name.""" script_path = create_test_graph_with_env_reader(tmp_path, env_var_name="CUSTOM_VAR", default_value="not-set") result = await run_flow( script_path=script_path, global_variables={"CUSTOM_VAR": "custom-value"}, verbose=False, check_variables=False, ) assert result["success"] is True assert "CUSTOM_VAR=custom-value" in result["result"] @pytest.mark.asyncio async def test_env_reader_uses_default_when_missing(self, tmp_path): """Test that default values are used when env var is missing.""" script_path = create_test_graph_with_env_reader( tmp_path, env_var_name="MISSING_VAR", default_value="default-used" ) result = await run_flow( script_path=script_path, global_variables={}, # Empty env vars verbose=False, check_variables=False, ) assert result["success"] is True assert "MISSING_VAR=default-used" in result["result"]
{ "repo_id": "langflow-ai/langflow", "file_path": "src/lfx/tests/unit/run/test_base_integration.py", "license": "MIT License", "lines": 183, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/api/v1/test_flows_path_validation.py
"""Unit tests for flow filesystem path validation security.""" from unittest.mock import MagicMock from uuid import uuid4 import anyio import pytest from fastapi import HTTPException from langflow.api.v1.flows import _get_safe_flow_path from langflow.services.storage.service import StorageService @pytest.fixture def mock_storage_service(tmp_path): """Create a mock storage service with a temporary data directory.""" service = MagicMock(spec=StorageService) service.data_dir = tmp_path return service @pytest.fixture def user_id(): """Create a test user ID.""" return uuid4() class TestPathValidation: """Test cases for path validation security.""" def test_rejects_absolute_path_outside_allowed_directory(self, mock_storage_service, user_id): """Test that absolute paths outside the allowed directory are rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("/etc/passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 assert "within" in exc_info.value.detail.lower() or "outside" in exc_info.value.detail.lower() @pytest.mark.asyncio async def test_accepts_absolute_path_within_allowed_directory(self, mock_storage_service, user_id, tmp_path): """Test that absolute paths within the user's flows directory are accepted.""" from pathlib import Path as StdlibPath import anyio mock_storage_service.data_dir = anyio.Path(tmp_path) base_dir = mock_storage_service.data_dir / "flows" / str(user_id) await base_dir.mkdir(parents=True, exist_ok=True) # Create an absolute path within the allowed directory (resolve to get actual absolute path) base_dir_stdlib = StdlibPath(str(base_dir)).resolve() allowed_absolute = str(base_dir_stdlib / "my_flow.json") path = _get_safe_flow_path(allowed_absolute, user_id, mock_storage_service) assert path is not None # Verify the returned path matches what we expect assert str(path) == allowed_absolute or str(path).endswith("my_flow.json") def test_rejects_directory_traversal(self, mock_storage_service, user_id): """Test that directory traversal sequences are rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("../../etc/passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 assert "directory traversal" in exc_info.value.detail.lower() def test_rejects_backslash_directory_traversal(self, mock_storage_service, user_id): """Test that backslash directory traversal is caught after normalization.""" # Backslash traversal should be normalized first, then caught with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("sub\\..\\etc\\passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 assert "directory traversal" in exc_info.value.detail.lower() def test_rejects_windows_backslash_traversal(self, mock_storage_service, user_id): """Test that Windows-style backslash traversal is rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("folder\\..\\..\\etc\\passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 assert "directory traversal" in exc_info.value.detail.lower() def test_rejects_multiple_traversal(self, mock_storage_service, user_id): """Test that multiple directory traversals are rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("../../../etc/passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 def test_rejects_traversal_in_subpath(self, mock_storage_service, user_id): """Test that traversal in subpaths is rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("subfolder/../../etc/passwd", user_id, mock_storage_service) assert exc_info.value.status_code == 400 def test_rejects_null_bytes(self, mock_storage_service, user_id): """Test that null bytes are rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("file\x00name.json", user_id, mock_storage_service) assert exc_info.value.status_code == 400 assert "null bytes" in exc_info.value.detail.lower() def test_rejects_empty_path(self, mock_storage_service, user_id): """Test that empty paths are rejected.""" with pytest.raises(HTTPException) as exc_info: _get_safe_flow_path("", user_id, mock_storage_service) assert exc_info.value.status_code == 400 def test_accepts_simple_relative_path(self, mock_storage_service, user_id): """Test that simple relative paths are accepted.""" path = _get_safe_flow_path("my_flow.json", user_id, mock_storage_service) assert path is not None # Verify it's within the user's flows directory assert str(user_id) in str(path) assert "flows" in str(path) def test_accepts_nested_relative_path(self, mock_storage_service, user_id): """Test that nested relative paths are accepted.""" path = _get_safe_flow_path("subfolder/my_flow.json", user_id, mock_storage_service) assert path is not None assert str(user_id) in str(path) assert "flows" in str(path) assert "subfolder" in str(path) def test_accepts_deeply_nested_path(self, mock_storage_service, user_id): """Test that deeply nested relative paths are accepted.""" path = _get_safe_flow_path("a/b/c/d/e/flow.json", user_id, mock_storage_service) assert path is not None assert str(user_id) in str(path) def test_path_is_user_isolated(self, mock_storage_service, user_id): # noqa: ARG002 """Test that paths are isolated per user.""" user1_id = uuid4() user2_id = uuid4() path1 = _get_safe_flow_path("flow.json", user1_id, mock_storage_service) path2 = _get_safe_flow_path("flow.json", user2_id, mock_storage_service) # Paths should be different and contain their respective user IDs assert str(path1) != str(path2) assert str(user1_id) in str(path1) assert str(user2_id) in str(path2) assert str(user1_id) not in str(path2) assert str(user2_id) not in str(path1) def test_handles_leading_slash_in_relative_path(self, mock_storage_service, user_id): """Test that leading slashes in relative paths are handled correctly.""" _get_safe_flow_path("flow.json", user_id, mock_storage_service) # Leading slash makes it absolute, so it will be checked against base directory # For a simple "/flow.json", it's not within the base dir, so it should be rejected with pytest.raises(HTTPException): _get_safe_flow_path("/flow.json", user_id, mock_storage_service) def test_accepts_paths_with_double_slash(self, mock_storage_service, user_id): """Test that paths with double slashes are normalized (not rejected, but normalized by Path).""" # Double slashes are normalized by the Path library, so they're acceptable # The security concern is directory traversal, not double slashes path = _get_safe_flow_path("sub//folder/file.json", user_id, mock_storage_service) assert path is not None def test_accepts_valid_filename_characters(self, mock_storage_service, user_id): """Test that valid filename characters are accepted.""" valid_paths = [ "flow.json", "my-flow.json", "flow_123.json", "flow.name.json", "subfolder/flow.json", "flow (1).json", ] for valid_path in valid_paths: path = _get_safe_flow_path(valid_path, user_id, mock_storage_service) assert path is not None @pytest.mark.asyncio async def test_path_resolves_within_base_directory(self, mock_storage_service, user_id, tmp_path): """Test that resolved paths stay within the base directory.""" from pathlib import Path as StdlibPath # Create a real path structure to test resolution mock_storage_service.data_dir = anyio.Path(tmp_path) path = _get_safe_flow_path("flow.json", user_id, mock_storage_service) # Use stdlib Path for synchronous resolution check resolved = StdlibPath(str(path)).resolve() base_dir_str = str(mock_storage_service.data_dir / "flows" / str(user_id)) resolved_base = StdlibPath(base_dir_str).resolve() # Resolved path should start with resolved base assert str(resolved).startswith(str(resolved_base))
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v1/test_flows_path_validation.py", "license": "MIT License", "lines": 151, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/utils/mcp_cleanup.py
"""MCP subprocess cleanup utilities for graceful shutdown. This module provides functions to properly terminate MCP server subprocesses spawned by stdio_client during Langflow shutdown. Works on macOS and Linux only. """ from __future__ import annotations import contextlib import sys from typing import TYPE_CHECKING from lfx.log.logger import logger if TYPE_CHECKING: import psutil as psutil_type async def cleanup_mcp_sessions() -> None: """Cleanup all MCP sessions to ensure subprocesses are properly terminated. This function should be called at the very beginning of the shutdown sequence to ensure MCP subprocesses are killed even if shutdown is interrupted. """ with contextlib.suppress(Exception): from lfx.base.mcp.util import MCPSessionManager from lfx.services.cache.utils import CACHE_MISS from langflow.services.deps import get_shared_component_cache_service cache_service = get_shared_component_cache_service() session_manager = cache_service.get("mcp_session_manager") if session_manager is not CACHE_MISS and isinstance(session_manager, MCPSessionManager): await session_manager.cleanup_all() # Fallback: Kill any MCP server processes (Unix only) with contextlib.suppress(Exception): await _kill_mcp_processes() async def _kill_mcp_processes() -> None: """Kill MCP server subprocesses spawned by this Langflow process. This is a fallback for when the normal cleanup doesn't properly terminate subprocesses spawned by stdio_client. Works on macOS and Linux only. """ if sys.platform == "win32": return try: import psutil except ImportError: return with contextlib.suppress(Exception): killed_count = await _terminate_child_mcp_processes(psutil) killed_count += await _terminate_orphaned_mcp_processes(psutil) if killed_count > 0: await logger.ainfo(f"Killed {killed_count} MCP processes") async def _terminate_child_mcp_processes(psutil: psutil_type) -> int: """Terminate MCP processes that are children of this process.""" killed_count = 0 try: current_process = psutil.Process() children = current_process.children(recursive=True) except psutil.NoSuchProcess: return 0 for proc in children: if await _try_terminate_mcp_process(proc, psutil): killed_count += 1 return killed_count async def _terminate_orphaned_mcp_processes(psutil: psutil_type) -> int: """Terminate orphaned MCP processes (ppid=1) on Unix systems.""" killed_count = 0 for proc in psutil.process_iter(["pid", "ppid", "cmdline"]): try: info = proc.info if info.get("ppid", 0) != 1: continue if await _try_terminate_mcp_process(proc, psutil): killed_count += 1 except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): continue return killed_count async def _try_terminate_mcp_process(proc: psutil_type.Process, psutil: psutil_type) -> bool: """Try to terminate a process if it's an MCP server process. Returns True if the process was terminated, False otherwise. """ try: cmdline = proc.cmdline() cmdline_str = " ".join(cmdline) if cmdline else "" if "mcp-server" not in cmdline_str and "mcp-proxy" not in cmdline_str: return False proc.terminate() try: proc.wait(timeout=2) except psutil.TimeoutExpired: proc.kill() except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): return False else: return True
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/utils/mcp_cleanup.py", "license": "MIT License", "lines": 88, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/utils/test_mcp_cleanup.py
"""Tests for MCP cleanup utilities.""" import sys from unittest.mock import AsyncMock, MagicMock, patch import pytest from langflow.utils.mcp_cleanup import ( _kill_mcp_processes, _terminate_child_mcp_processes, _terminate_orphaned_mcp_processes, _try_terminate_mcp_process, cleanup_mcp_sessions, ) pytestmark = pytest.mark.asyncio class TestCleanupMcpSessions: """Tests for cleanup_mcp_sessions function.""" async def test_cleanup_with_valid_session_manager(self): """Test cleanup when a valid MCPSessionManager exists in cache.""" mock_session_manager = MagicMock() mock_session_manager.cleanup_all = AsyncMock() mock_cache_service = MagicMock() mock_cache_service.get.return_value = mock_session_manager with ( patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock) as mock_kill, patch("lfx.base.mcp.util.MCPSessionManager", new=type(mock_session_manager)), ): await cleanup_mcp_sessions() mock_session_manager.cleanup_all.assert_called_once() mock_kill.assert_called_once() async def test_cleanup_with_cache_miss(self): """Test cleanup when no session manager exists (cache miss).""" mock_cache_service = MagicMock() cache_miss_sentinel = object() with ( patch("lfx.services.cache.utils.CACHE_MISS", cache_miss_sentinel), patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock) as mock_kill, ): mock_cache_service.get.return_value = cache_miss_sentinel await cleanup_mcp_sessions() # Should still call the fallback kill function mock_kill.assert_called_once() async def test_cleanup_handles_import_error(self): """Test cleanup handles import errors gracefully.""" with ( patch.dict("sys.modules", {"lfx.base.mcp.util": None}), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock) as mock_kill, ): # Should not raise, should silently continue await cleanup_mcp_sessions() mock_kill.assert_called_once() async def test_cleanup_handles_exception_in_session_manager(self): """Test cleanup handles exceptions from session manager gracefully.""" mock_session_manager = MagicMock() mock_session_manager.cleanup_all = AsyncMock(side_effect=Exception("Test error")) mock_cache_service = MagicMock() mock_cache_service.get.return_value = mock_session_manager with ( patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock) as mock_kill, patch("lfx.base.mcp.util.MCPSessionManager", new=type(mock_session_manager)), ): # Should not raise await cleanup_mcp_sessions() # Fallback should still be called mock_kill.assert_called_once() class TestKillMcpProcesses: """Tests for _kill_mcp_processes function.""" async def test_skips_on_windows(self): """Test that the function skips on Windows.""" with patch.object(sys, "platform", "win32"): # Should return immediately without doing anything await _kill_mcp_processes() async def test_skips_when_psutil_not_available(self): """Test that the function handles missing psutil gracefully.""" with ( patch.object(sys, "platform", "darwin"), patch.dict("sys.modules", {"psutil": None}), ): # Should not raise await _kill_mcp_processes() async def test_kills_child_and_orphaned_processes(self): """Test that both child and orphaned processes are terminated.""" mock_psutil = MagicMock() with ( patch.object(sys, "platform", "darwin"), patch.dict("sys.modules", {"psutil": mock_psutil}), patch( "langflow.utils.mcp_cleanup._terminate_child_mcp_processes", new_callable=AsyncMock, return_value=2, ) as mock_child, patch( "langflow.utils.mcp_cleanup._terminate_orphaned_mcp_processes", new_callable=AsyncMock, return_value=1, ) as mock_orphan, ): await _kill_mcp_processes() mock_child.assert_called_once() mock_orphan.assert_called_once() async def test_logs_killed_count(self): """Test that killed process count is logged.""" mock_psutil = MagicMock() with ( patch.object(sys, "platform", "darwin"), patch.dict("sys.modules", {"psutil": mock_psutil}), patch( "langflow.utils.mcp_cleanup._terminate_child_mcp_processes", new_callable=AsyncMock, return_value=3, ), patch( "langflow.utils.mcp_cleanup._terminate_orphaned_mcp_processes", new_callable=AsyncMock, return_value=2, ), patch("langflow.utils.mcp_cleanup.logger") as mock_logger, ): mock_logger.ainfo = AsyncMock() await _kill_mcp_processes() mock_logger.ainfo.assert_called_once() call_args = mock_logger.ainfo.call_args[0][0] assert "5" in call_args # 3 + 2 = 5 async def test_does_not_log_when_no_processes_killed(self): """Test that no log is made when no processes are killed.""" mock_psutil = MagicMock() with ( patch.object(sys, "platform", "darwin"), patch.dict("sys.modules", {"psutil": mock_psutil}), patch( "langflow.utils.mcp_cleanup._terminate_child_mcp_processes", new_callable=AsyncMock, return_value=0, ), patch( "langflow.utils.mcp_cleanup._terminate_orphaned_mcp_processes", new_callable=AsyncMock, return_value=0, ), patch("langflow.utils.mcp_cleanup.logger") as mock_logger, ): mock_logger.ainfo = AsyncMock() await _kill_mcp_processes() mock_logger.ainfo.assert_not_called() class TestTerminateChildMcpProcesses: """Tests for _terminate_child_mcp_processes function.""" async def test_terminates_mcp_child_processes(self): """Test that MCP child processes are terminated.""" mock_psutil = MagicMock() mock_mcp_proc = MagicMock() mock_mcp_proc.cmdline.return_value = ["python", "mcp-server-filesystem", "/tmp"] # noqa: S108 mock_mcp_proc.terminate = MagicMock() mock_mcp_proc.wait = MagicMock() mock_other_proc = MagicMock() mock_other_proc.cmdline.return_value = ["python", "other_script.py"] mock_current = MagicMock() mock_current.children.return_value = [mock_mcp_proc, mock_other_proc] mock_psutil.Process.return_value = mock_current mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = Exception with patch( "langflow.utils.mcp_cleanup._try_terminate_mcp_process", new_callable=AsyncMock, side_effect=[True, False], ): count = await _terminate_child_mcp_processes(mock_psutil) assert count == 1 async def test_handles_no_such_process_on_children(self): """Test handling when current process doesn't exist.""" mock_psutil = MagicMock() mock_current = MagicMock() mock_current.children.side_effect = mock_psutil.NoSuchProcess mock_psutil.Process.return_value = mock_current mock_psutil.NoSuchProcess = Exception count = await _terminate_child_mcp_processes(mock_psutil) assert count == 0 class TestTerminateOrphanedMcpProcesses: """Tests for _terminate_orphaned_mcp_processes function.""" async def test_terminates_orphaned_mcp_processes(self): """Test that orphaned MCP processes (ppid=1) are terminated.""" mock_psutil = MagicMock() # Orphaned MCP process (ppid=1) mock_orphan_mcp = MagicMock() mock_orphan_mcp.info = { "pid": 12345, "ppid": 1, "cmdline": ["python", "mcp-server-filesystem", "/tmp"], # noqa: S108 } # Non-orphaned MCP process mock_non_orphan = MagicMock() mock_non_orphan.info = { "pid": 12346, "ppid": 1000, "cmdline": ["python", "mcp-server-filesystem", "/tmp"], # noqa: S108 } # Orphaned non-MCP process mock_orphan_other = MagicMock() mock_orphan_other.info = { "pid": 12347, "ppid": 1, "cmdline": ["python", "other_script.py"], } mock_psutil.process_iter.return_value = [mock_orphan_mcp, mock_non_orphan, mock_orphan_other] mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception with patch( "langflow.utils.mcp_cleanup._try_terminate_mcp_process", new_callable=AsyncMock, side_effect=[True, False], # Only first call (orphan_mcp) returns True ): count = await _terminate_orphaned_mcp_processes(mock_psutil) # Only the orphaned MCP process should be attempted (ppid=1) # The non-orphan (ppid=1000) should be skipped before _try_terminate is called assert count == 1 async def test_skips_non_orphaned_processes(self): """Test that non-orphaned processes are skipped.""" mock_psutil = MagicMock() mock_proc = MagicMock() mock_proc.info = { "pid": 12345, "ppid": 1000, # Not orphaned "cmdline": ["python", "mcp-server-filesystem"], } mock_psutil.process_iter.return_value = [mock_proc] mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception with patch( "langflow.utils.mcp_cleanup._try_terminate_mcp_process", new_callable=AsyncMock, ) as mock_terminate: count = await _terminate_orphaned_mcp_processes(mock_psutil) assert count == 0 mock_terminate.assert_not_called() async def test_handles_access_denied(self): """Test handling AccessDenied exception during iteration.""" mock_psutil = MagicMock() mock_proc = MagicMock() mock_proc.info = property(lambda _: (_ for _ in ()).throw(mock_psutil.AccessDenied)) # Make info raise AccessDenied type(mock_proc).info = property(lambda _: (_ for _ in ()).throw(mock_psutil.AccessDenied)) mock_psutil.process_iter.return_value = [mock_proc] mock_psutil.NoSuchProcess = type("NoSuchProcess", (Exception,), {}) mock_psutil.AccessDenied = type("AccessDenied", (Exception,), {}) mock_psutil.ZombieProcess = type("ZombieProcess", (Exception,), {}) # Should not raise count = await _terminate_orphaned_mcp_processes(mock_psutil) assert count == 0 class TestTryTerminateMcpProcess: """Tests for _try_terminate_mcp_process function.""" async def test_terminates_mcp_server_process(self): """Test termination of mcp-server process.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = ["python", "mcp-server-filesystem", "/tmp"] # noqa: S108 mock_proc.terminate = MagicMock() mock_proc.wait = MagicMock() result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is True mock_proc.terminate.assert_called_once() mock_proc.wait.assert_called_once_with(timeout=2) async def test_terminates_mcp_proxy_process(self): """Test termination of mcp-proxy process.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = ["mcp-proxy", "--port", "8080"] mock_proc.terminate = MagicMock() mock_proc.wait = MagicMock() result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is True mock_proc.terminate.assert_called_once() async def test_skips_non_mcp_process(self): """Test that non-MCP processes are skipped.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = ["python", "some_other_script.py"] result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False mock_proc.terminate.assert_not_called() async def test_kills_process_on_timeout(self): """Test that process is killed when terminate times out.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = type("TimeoutExpired", (Exception,), {}) mock_proc = MagicMock() mock_proc.cmdline.return_value = ["python", "mcp-server-test"] mock_proc.terminate = MagicMock() mock_proc.wait = MagicMock(side_effect=mock_psutil.TimeoutExpired) mock_proc.kill = MagicMock() result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is True mock_proc.terminate.assert_called_once() mock_proc.kill.assert_called_once() async def test_handles_no_such_process(self): """Test handling when process no longer exists.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = type("NoSuchProcess", (Exception,), {}) mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_proc = MagicMock() mock_proc.cmdline.side_effect = mock_psutil.NoSuchProcess result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False async def test_handles_access_denied(self): """Test handling when access is denied.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = type("AccessDenied", (Exception,), {}) mock_psutil.ZombieProcess = Exception mock_proc = MagicMock() mock_proc.cmdline.side_effect = mock_psutil.AccessDenied result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False async def test_handles_zombie_process(self): """Test handling zombie processes.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = type("ZombieProcess", (Exception,), {}) mock_proc = MagicMock() mock_proc.cmdline.side_effect = mock_psutil.ZombieProcess result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False async def test_handles_empty_cmdline(self): """Test handling when cmdline returns empty list.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = [] result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False async def test_handles_none_cmdline(self): """Test handling when cmdline returns None.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = None result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is False class TestMcpCleanupIntegration: """Integration tests for MCP cleanup.""" async def test_full_cleanup_flow_success(self): """Test the complete cleanup flow when everything works.""" mock_session_manager = MagicMock() mock_session_manager.cleanup_all = AsyncMock() mock_cache_service = MagicMock() mock_cache_service.get.return_value = mock_session_manager with ( patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock), patch("lfx.base.mcp.util.MCPSessionManager", new=type(mock_session_manager)), ): # Should complete without raising await cleanup_mcp_sessions() async def test_full_cleanup_flow_with_all_errors(self): """Test that cleanup continues even when everything fails.""" mock_cache_service = MagicMock() mock_cache_service.get.side_effect = Exception("Cache error") with ( patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch( "langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock, side_effect=Exception("Kill error"), ), ): # Should not raise even with all errors await cleanup_mcp_sessions() async def test_cleanup_is_silent_on_errors(self): """Test that cleanup doesn't log errors (silent failure during shutdown).""" mock_cache_service = MagicMock() mock_cache_service.get.side_effect = Exception("Some error") with ( patch( "langflow.services.deps.get_shared_component_cache_service", return_value=mock_cache_service, ), patch("langflow.utils.mcp_cleanup._kill_mcp_processes", new_callable=AsyncMock), patch("langflow.utils.mcp_cleanup.logger") as mock_logger, ): mock_logger.awarning = AsyncMock() mock_logger.aerror = AsyncMock() await cleanup_mcp_sessions() # Should not log errors during cleanup (silent failure) mock_logger.awarning.assert_not_called() mock_logger.aerror.assert_not_called()
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/utils/test_mcp_cleanup.py", "license": "MIT License", "lines": 421, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/tests/api/v1/test_openai_responses_error.py
"""Test OpenAI Responses Error Handling.""" import json from unittest.mock import MagicMock, patch import pytest from fastapi.testclient import TestClient from langflow.main import create_app @pytest.fixture def client(): app = create_app() return TestClient(app) @pytest.mark.asyncio async def test_openai_response_stream_error_handling(client): """Test that errors during streaming are correctly propagated to the client. Ensure errors are propagated as OpenAI-compatible error responses. """ # Mock api_key_security dependency from langflow.services.auth.utils import api_key_security from langflow.services.database.models.user.model import UserRead async def mock_api_key_security(): from datetime import datetime, timezone now = datetime.now(timezone.utc) return UserRead( id="00000000-0000-0000-0000-000000000000", username="testuser", is_active=True, is_superuser=False, create_at=now, updated_at=now, profile_image=None, store_api_key=None, last_login_at=None, optins=None, ) client.app.dependency_overrides[api_key_security] = mock_api_key_security # Mock the flow execution to simulate an error during streaming with ( patch("langflow.api.v1.openai_responses.get_flow_by_id_or_endpoint_name") as mock_get_flow, patch("langflow.api.v1.openai_responses.run_flow_generator") as _, patch("langflow.api.v1.openai_responses.consume_and_yield") as mock_consume, ): # Setup mock flow mock_flow = MagicMock() mock_flow.data = {"nodes": [{"data": {"type": "ChatInput"}}, {"data": {"type": "ChatOutput"}}]} mock_get_flow.return_value = mock_flow # We need to simulate the event manager queue behavior # The run_flow_generator in the actual code puts events into the event_manager # which puts them into the queue. # Instead of mocking the complex event manager interaction, we can mock # consume_and_yield to yield our simulated error event # Simulate an error event from the queue error_event = json.dumps({"event": "error", "data": {"error": "Simulated streaming error"}}).encode("utf-8") # Yield error event then None to end stream async def event_generator(*_, **__): yield error_event yield None mock_consume.side_effect = event_generator # Make the request response = client.post( "/api/v1/responses", json={"model": "test-flow-id", "input": "test input", "stream": True}, headers={"Authorization": "Bearer test-key"}, ) # Check response assert response.status_code == 200 content = response.content.decode("utf-8") # Verify we got the error event in the stream assert ( "event: error" not in content ) # OpenAI format doesn't use event: error for the data payload itself usually, but let's check the data # We expect a data line with the error JSON # The fix implementation: yield f"data: {json.dumps(error_response)}\n\n" expected_error_part = '"message": "Simulated streaming error"' assert expected_error_part in content assert '"type": "processing_error"' in content # Clean up overrides client.app.dependency_overrides = {}
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/tests/api/v1/test_openai_responses_error.py", "license": "MIT License", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/test_starter_projects.py
"""Test suite for starter project JSON files. Verifies that starter project JSON files are properly structured and that: - noteNode types have width/height at the root level - Other node types have width/height removed from root level """ import json from pathlib import Path import pytest STARTER_PROJECTS_DIR = Path(__file__).parent.parent / "base" / "langflow" / "initial_setup" / "starter_projects" def get_starter_project_files() -> list[Path]: """Get all starter project JSON files.""" if not STARTER_PROJECTS_DIR.exists(): msg = f"Starter projects directory not found: {STARTER_PROJECTS_DIR}" raise FileNotFoundError(msg) from None json_files = sorted(STARTER_PROJECTS_DIR.glob("*.json")) if not json_files: msg = f"No JSON files found in {STARTER_PROJECTS_DIR}" raise FileNotFoundError(msg) from None return json_files def load_json_file(json_file: Path) -> dict: """Load and parse a JSON file.""" try: with json_file.open(encoding="utf-8") as f: return json.load(f) except json.JSONDecodeError as e: msg = f"Invalid JSON in {json_file.name}: {e}" raise ValueError(msg) from e except Exception as e: msg = f"Error reading {json_file.name}: {e}" raise OSError(msg) from e @pytest.mark.parametrize("json_file", get_starter_project_files(), ids=lambda f: f.name) class TestStarterProjects: """Test suite for all starter project JSON files.""" def test_json_validity(self, json_file: Path): """Test that JSON file is valid and can be parsed.""" data = load_json_file(json_file) assert isinstance(data, dict), f"{json_file.name} should be a valid JSON object" def test_width_height_at_node_level(self, json_file: Path): """Test that width/height are removed from node root level for all node types EXCEPT noteNode. noteNode type SHOULD have width/height at root level. Other node types should NOT have width/height at root level. """ data = load_json_file(json_file) nodes = data["data"]["nodes"] issues = [] for node_idx, node in enumerate(nodes): node_type = node.get("type", "unknown") node_id = node.get("id", "UNKNOWN") # noteNode SHOULD have width/height at root level - skip checking these if node_type == "noteNode": continue # For non-noteNode types, width/height should NOT exist at node level if "width" in node: issues.append( f"Node {node_idx} (ID: {node_id}, type: {node_type}): " f"'width' found at node root level (value: {node['width']}) - " f"should be removed for non-noteNode types" ) if "height" in node: issues.append( f"Node {node_idx} (ID: {node_id}, type: {node_type}): " f"'height' found at node root level (value: {node['height']}) - " f"should be removed for non-noteNode types" ) assert not issues, f"{json_file.name}: Width/height issues found:\n" + "\n".join(issues)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/test_starter_projects.py", "license": "MIT License", "lines": 65, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/components/models_and_agents/test_mcp_component_output.py
"""Tests for MCP component output processing.""" from unittest.mock import AsyncMock, MagicMock import pytest from lfx.components.models_and_agents.mcp_component import MCPToolsComponent from lfx.schema.dataframe import DataFrame class TestMCPComponentOutputProcessing: """Test MCP component output processing, particularly for DataFrame compatibility.""" @pytest.fixture def component(self): """Create an MCP component for testing.""" return MCPToolsComponent() def test_process_output_item_with_dict_json(self, component): """Test that process_output_item handles dict JSON correctly.""" item_dict = {"type": "text", "text": '{"key": "value", "number": 42}'} result = component.process_output_item(item_dict) assert isinstance(result, dict) assert result == {"key": "value", "number": 42} def test_process_output_item_with_string_json(self, component): """Test that process_output_item wraps string JSON values in dict.""" item_dict = {"type": "text", "text": '"hello world"'} result = component.process_output_item(item_dict) assert isinstance(result, dict) assert result["text"] == '"hello world"' assert result["parsed_value"] == "hello world" assert result["type"] == "text" def test_process_output_item_with_number_json(self, component): """Test that process_output_item wraps number JSON values in dict.""" item_dict = {"type": "text", "text": "42"} result = component.process_output_item(item_dict) assert isinstance(result, dict) assert result["text"] == "42" assert result["parsed_value"] == 42 assert result["type"] == "text" def test_process_output_item_with_array_json(self, component): """Test that process_output_item wraps array JSON values in dict.""" item_dict = {"type": "text", "text": '["item1", "item2", "item3"]'} result = component.process_output_item(item_dict) assert isinstance(result, dict) assert result["text"] == '["item1", "item2", "item3"]' assert result["parsed_value"] == ["item1", "item2", "item3"] assert result["type"] == "text" def test_process_output_item_with_invalid_json(self, component): """Test that process_output_item handles invalid JSON gracefully.""" item_dict = {"type": "text", "text": "not valid json {"} result = component.process_output_item(item_dict) assert isinstance(result, dict) assert result == item_dict def test_process_output_item_non_text_type(self, component): """Test that process_output_item returns non-text items unchanged.""" item_dict = {"type": "image", "url": "https://example.com/image.png"} result = component.process_output_item(item_dict) assert result == item_dict @pytest.mark.asyncio async def test_build_output_creates_valid_dataframe(self, component): """Test that build_output creates a valid DataFrame with mixed JSON types.""" # Setup component with mocked tools and cache component.tool = "test_tool" component.tools = [] # Mock the tool cache mock_tool = MagicMock() mock_result = MagicMock() # Create mock output with various JSON types mock_content_item1 = MagicMock() mock_content_item1.model_dump.return_value = {"type": "text", "text": '{"status": "success"}'} mock_content_item2 = MagicMock() mock_content_item2.model_dump.return_value = {"type": "text", "text": '"just a string"'} mock_content_item3 = MagicMock() mock_content_item3.model_dump.return_value = {"type": "text", "text": "42"} mock_result.content = [mock_content_item1, mock_content_item2, mock_content_item3] mock_tool.coroutine = AsyncMock(return_value=mock_result) component._tool_cache = {"test_tool": mock_tool} # Mock update_tool_list component.update_tool_list = AsyncMock(return_value=([], None)) # Mock get_inputs_for_all_tools to return empty list component.get_inputs_for_all_tools = MagicMock(return_value={"test_tool": []}) # Execute build_output result = await component.build_output() # Verify result is a DataFrame assert isinstance(result, DataFrame) # Verify all items in DataFrame are dictionaries for _idx, row in result.iterrows(): # Each row should be a valid Series (which can be converted to dict) assert row is not None # Verify the DataFrame has the expected number of rows assert len(result) == 3 # Verify first row is the original dict assert result.iloc[0]["status"] == "success" # Verify second row is wrapped string assert result.iloc[1]["parsed_value"] == "just a string" assert result.iloc[1]["type"] == "text" # Verify third row is wrapped number assert result.iloc[2]["parsed_value"] == 42 assert result.iloc[2]["type"] == "text" @pytest.mark.asyncio async def test_build_output_with_no_tool_selected(self, component): """Test that build_output returns error DataFrame when no tool is selected.""" component.tool = "" component.update_tool_list = AsyncMock(return_value=([], None)) result = await component.build_output() assert isinstance(result, DataFrame) assert len(result) == 1 assert result.iloc[0]["error"] == "You must select a tool"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/components/models_and_agents/test_mcp_component_output.py", "license": "MIT License", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/base/langflow/api/v1/model_options.py
from fastapi import APIRouter from lfx.base.models.unified_models import get_embedding_model_options, get_language_model_options from langflow.api.utils import CurrentActiveUser router = APIRouter(prefix="/model_options", tags=["Model Options"], include_in_schema=False) @router.get("/language", status_code=200) async def get_language_model_options_endpoint( current_user: CurrentActiveUser, ): """Get language model options filtered by user's enabled providers and models.""" return get_language_model_options(user_id=current_user.id) @router.get("/embedding", status_code=200) async def get_embedding_model_options_endpoint( current_user: CurrentActiveUser, ): """Get embedding model options filtered by user's enabled providers and models.""" return get_embedding_model_options(user_id=current_user.id)
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/api/v1/model_options.py", "license": "MIT License", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
langflow-ai/langflow:src/backend/base/langflow/api/v1/models.py
from __future__ import annotations import json from typing import Annotated from fastapi import APIRouter, Depends, HTTPException, Query from lfx.base.models.model_utils import replace_with_live_models from lfx.base.models.unified_models import ( get_model_provider_metadata, get_model_provider_variable_mapping, get_model_providers, get_provider_all_variables, get_unified_models_detailed, ) from loguru import logger from pydantic import BaseModel, field_validator from langflow.api.utils import CurrentActiveUser, DbSession from langflow.services.auth.utils import get_current_active_user from langflow.services.deps import get_variable_service from langflow.services.variable.constants import GENERIC_TYPE from langflow.services.variable.service import DatabaseVariableService router = APIRouter(prefix="/models", tags=["Models"], include_in_schema=False) # Variable names for storing disabled models and default models DISABLED_MODELS_VAR = "__disabled_models__" ENABLED_MODELS_VAR = "__enabled_models__" DEFAULT_LANGUAGE_MODEL_VAR = "__default_language_model__" DEFAULT_EMBEDDING_MODEL_VAR = "__default_embedding_model__" # Security limits MAX_STRING_LENGTH = 200 # Maximum length for model IDs and provider names MAX_BATCH_UPDATE_SIZE = 100 # Maximum number of models that can be updated at once def get_provider_from_variable_name(variable_name: str) -> str | None: """Get provider name from a model provider variable name. Args: variable_name: The variable name (e.g., "OPENAI_API_KEY") Returns: The provider name (e.g., "OpenAI") or None if not a model provider variable """ provider_mapping = get_model_provider_variable_mapping() # Reverse the mapping to get provider from variable name for provider, var_name in provider_mapping.items(): if var_name == variable_name: return provider return None def get_model_names_for_provider(provider: str) -> set[str]: """Get all model names for a given provider. Args: provider: The provider name (e.g., "OpenAI") Returns: A set of model names for that provider """ models_by_provider = get_unified_models_detailed( providers=[provider], include_unsupported=True, include_deprecated=True, ) model_names = set() for provider_dict in models_by_provider: if provider_dict.get("provider") == provider: for model in provider_dict.get("models", []): model_names.add(model.get("model_name")) return model_names class ModelStatusUpdate(BaseModel): """Request model for updating model enabled status.""" provider: str model_id: str enabled: bool @field_validator("model_id", "provider") @classmethod def validate_non_empty_string(cls, v: str) -> str: """Ensure strings are non-empty and reasonable length.""" if not v or not v.strip(): msg = "Field cannot be empty" raise ValueError(msg) if len(v) > MAX_STRING_LENGTH: msg = f"Field exceeds maximum length of {MAX_STRING_LENGTH} characters" raise ValueError(msg) return v.strip() class ValidateProviderRequest(BaseModel): """Request model for validating provider credentials.""" provider: str variables: dict[str, str] # {variable_key: value} @field_validator("provider") @classmethod def validate_provider(cls, v: str) -> str: """Ensure provider name is valid.""" if not v or not v.strip(): msg = "Provider cannot be empty" raise ValueError(msg) if len(v) > MAX_STRING_LENGTH: msg = f"Provider exceeds maximum length of {MAX_STRING_LENGTH} characters" raise ValueError(msg) return v.strip() class ValidateProviderResponse(BaseModel): """Response model for provider validation.""" valid: bool error: str | None = None @router.get("/providers", status_code=200, dependencies=[Depends(get_current_active_user)]) async def list_model_providers() -> list[str]: """Return available model providers.""" return get_model_providers() @router.get("", status_code=200) async def list_models( *, provider: Annotated[list[str] | None, Query(description="Repeat to include multiple providers")] = None, model_name: str | None = None, model_type: str | None = None, include_unsupported: bool = False, include_deprecated: bool = False, # common metadata filters tool_calling: bool | None = None, reasoning: bool | None = None, search: bool | None = None, preview: bool | None = None, deprecated: bool | None = None, not_supported: bool | None = None, session: DbSession, current_user: CurrentActiveUser, ): """Return model catalog filtered by query parameters. Pass providers as repeated query params, e.g. `?provider=OpenAI&provider=Anthropic`. """ selected_providers: list[str] | None = provider metadata_filters = { k: v for k, v in { "tool_calling": tool_calling, "reasoning": reasoning, "search": search, "preview": preview, "deprecated": deprecated, "not_supported": not_supported, }.items() if v is not None } # Get enabled providers status (now just checks if variables exist) enabled_providers_result = await get_enabled_providers(session=session, current_user=current_user) provider_configured_status = enabled_providers_result.get("provider_status", {}) # Get enabled models map for current user to determine "active" providers enabled_models_result = await get_enabled_models(session=session, current_user=current_user) enabled_models_map = enabled_models_result.get("enabled_models", {}) # Get default model if model_type is specified default_provider = None if model_type: try: default_model_result = await get_default_model( session=session, current_user=current_user, model_type=model_type ) if default_model_result.get("default_model"): default_provider = default_model_result["default_model"].get("provider") except Exception: # noqa: BLE001 # Default model fetch failed, continue without it # This is not critical for the main operation - we suppress to avoid breaking the list logger.debug("Failed to fetch default model, continuing without it", exc_info=True) # Get filtered models - pass providers directly to avoid filtering after filtered_models = get_unified_models_detailed( providers=selected_providers, model_name=model_name, include_unsupported=include_unsupported, include_deprecated=include_deprecated, model_type=model_type, **metadata_filters, ) # Add configured and enabled status to each provider for provider_dict in filtered_models: prov_name = provider_dict.get("provider") provider_dict["is_configured"] = provider_configured_status.get(prov_name, False) # Provider is "enabled" (active) if it has at least one enabled model prov_models_status = enabled_models_map.get(prov_name, {}) has_active_model = any(prov_models_status.values()) provider_dict["is_enabled"] = has_active_model # Replace static models with live models for providers that support it configured_providers = {p for p, configured in provider_configured_status.items() if configured} replace_with_live_models(filtered_models, current_user.id, configured_providers, model_type) # Sort providers: # 1. Provider with default model first # 2. Configured providers next # 3. Alphabetically after that def sort_key(provider_dict): provider_name = provider_dict.get("provider", "") # Use is_configured for sorting priority (so they appear at top when ready) is_configured = provider_dict.get("is_configured", False) is_default = provider_name == default_provider # Return tuple for sorting: (not is_default, not is_configured, provider_name) # This way default comes first (False < True), then configured, then alphabetical return (not is_default, not is_configured, provider_name) filtered_models.sort(key=sort_key) return filtered_models @router.get("/provider-variable-mapping", status_code=200) async def get_model_provider_mapping() -> dict[str, list[dict]]: """Return provider variables mapping with full variable info. Each provider maps to a list of variable objects containing: - variable_name: Display name shown to user - variable_key: Environment variable key - description: Help text for the variable - required: Whether the variable is required - is_secret: Whether to treat as credential - is_list: Whether it accepts multiple values - options: Predefined options for dropdowns """ metadata = get_model_provider_metadata() return {provider: meta.get("variables", []) for provider, meta in metadata.items()} @router.get("/enabled_providers", status_code=200) async def get_enabled_providers( *, session: DbSession, current_user: CurrentActiveUser, providers: Annotated[list[str] | None, Query()] = None, ): """Get enabled providers for the current user. Providers are considered enabled if they have a credential variable stored. API key validation is performed when credentials are saved, not on every read, to avoid latency from external API calls. """ variable_service = get_variable_service() try: if not isinstance(variable_service, DatabaseVariableService): raise HTTPException( status_code=500, detail="Variable service is not an instance of DatabaseVariableService", ) # Get all variables (VariableRead objects) all_variables = await variable_service.get_all(user_id=current_user.id, session=session) # Build a set of all variable names we have all_variable_names = {var.name for var in all_variables} # Get the provider-variable mapping provider_variable_map = get_model_provider_variable_mapping() # Check which providers have all required variables saved enabled_providers = [] provider_status = {} for provider in provider_variable_map: # Get ALL variables for this provider provider_vars = get_provider_all_variables(provider) # Check if all REQUIRED variables are present required_vars = [v for v in provider_vars if v.get("required", False)] all_required_present = all(v.get("variable_key") in all_variable_names for v in required_vars) provider_status[provider] = all_required_present if all_required_present: enabled_providers.append(provider) result = { "enabled_providers": enabled_providers, "provider_status": provider_status, } if providers: # Filter enabled_providers and provider_status by requested providers filtered_enabled = [p for p in result["enabled_providers"] if p in providers] provider_status_dict = result.get("provider_status", {}) if not isinstance(provider_status_dict, dict): provider_status_dict = {} filtered_status = {p: v for p, v in provider_status_dict.items() if p in providers} return { "enabled_providers": filtered_enabled, "provider_status": filtered_status, } except HTTPException: raise except Exception as e: logger.exception("Failed to get enabled providers for user %s", current_user.id) raise HTTPException( status_code=500, detail="Failed to retrieve enabled providers. Please try again later.", ) from e else: return result @router.post("/validate-provider", status_code=200, response_model=ValidateProviderResponse) async def validate_provider( request: ValidateProviderRequest, current_user: CurrentActiveUser, # noqa: ARG001 ) -> ValidateProviderResponse: """Validate provider credentials before saving. This endpoint checks if the provided credentials are valid by attempting to connect to the provider. Use this for real-time validation in the UI. """ from lfx.base.models.unified_models import validate_model_provider_key try: # Validate the credentials validate_model_provider_key(request.provider, request.variables) return ValidateProviderResponse(valid=True, error=None) except ValueError as e: return ValidateProviderResponse(valid=False, error=str(e)) except (ConnectionError, TimeoutError, RuntimeError, KeyError, AttributeError, TypeError) as e: logger.exception("Unexpected error validating provider %s", request.provider) return ValidateProviderResponse(valid=False, error=f"Validation failed: {e}") async def _get_disabled_models(session: DbSession, current_user: CurrentActiveUser) -> set[str]: """Helper function to get the set of disabled model IDs.""" variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): return set() try: var = await variable_service.get_variable_object( user_id=current_user.id, name=DISABLED_MODELS_VAR, session=session ) if var.value: # This checks for both None and empty string try: parsed_value = json.loads(var.value) # Validate it's a list of strings if not isinstance(parsed_value, list): logger.warning("Invalid disabled models format for user %s: not a list", current_user.id) return set() # Ensure all items are strings return {str(item) for item in parsed_value if isinstance(item, str)} except (json.JSONDecodeError, TypeError): logger.warning("Failed to parse disabled models for user %s", current_user.id, exc_info=True) return set() except ValueError: # Variable not found, return empty set pass return set() async def _get_enabled_models(session: DbSession, current_user: CurrentActiveUser) -> set[str]: """Helper function to get the set of explicitly enabled model IDs. These are models that were NOT default but were explicitly enabled by the user. """ variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): return set() try: var = await variable_service.get_variable_object( user_id=current_user.id, name=ENABLED_MODELS_VAR, session=session ) # Strip whitespace and check if value is non-empty if var.value and (value_stripped := var.value.strip()): try: parsed_value = json.loads(value_stripped) # Validate it's a list of strings if not isinstance(parsed_value, list): logger.warning("Invalid enabled models format for user %s: not a list", current_user.id) return set() # Ensure all items are strings return {str(item) for item in parsed_value if isinstance(item, str)} except (json.JSONDecodeError, TypeError): # Log at debug level to avoid flooding logs with expected edge cases logger.debug("Failed to parse enabled models for user %s: %s", current_user.id, var.value) return set() except ValueError: # Variable not found, return empty set pass return set() def _build_model_default_flags() -> dict[str, bool]: """Build a map of model names to their default flag status. Returns: Dictionary mapping model names to whether they are default models """ all_models_by_provider = get_unified_models_detailed( include_unsupported=True, include_deprecated=True, ) is_default_model = {} for provider_dict in all_models_by_provider: for model in provider_dict.get("models", []): model_name = model.get("model_name") is_default = model.get("metadata", {}).get("default", False) is_default_model[model_name] = is_default return is_default_model def _update_model_sets( updates: list[ModelStatusUpdate], disabled_models: set[str], explicitly_enabled_models: set[str], is_default_model: dict[str, bool], ) -> None: """Update disabled and enabled model sets based on user requests. Args: updates: List of model status updates from user disabled_models: Set of disabled model IDs (modified in place) explicitly_enabled_models: Set of explicitly enabled model IDs (modified in place) is_default_model: Map of model names to their default flag status """ for update in updates: model_is_default = is_default_model.get(update.model_id, False) if update.enabled: # User wants to enable the model disabled_models.discard(update.model_id) # If it's not a default model, add to explicitly enabled list if not model_is_default: explicitly_enabled_models.add(update.model_id) else: # User wants to disable the model disabled_models.add(update.model_id) explicitly_enabled_models.discard(update.model_id) async def _save_model_list_variable( variable_service: DatabaseVariableService, session: DbSession, current_user: CurrentActiveUser, var_name: str, model_set: set[str], ) -> None: """Save or update a model list variable. Args: variable_service: The database variable service session: Database session current_user: Current active user var_name: Name of the variable to save model_set: Set of model names to save Raises: HTTPException: If there's an error saving the variable """ from langflow.services.database.models.variable.model import VariableUpdate models_json = json.dumps(list(model_set)) try: existing_var = await variable_service.get_variable_object( user_id=current_user.id, name=var_name, session=session ) if existing_var is None or existing_var.id is None: msg = f"Variable {var_name} not found" raise ValueError(msg) # Update or delete based on whether there are models if model_set or var_name == DISABLED_MODELS_VAR: # Always update disabled models, even if empty # Only update enabled models if non-empty await variable_service.update_variable_fields( user_id=current_user.id, variable_id=existing_var.id, variable=VariableUpdate(id=existing_var.id, name=var_name, value=models_json, type=GENERIC_TYPE), session=session, ) else: # No explicitly enabled models, delete the variable await variable_service.delete_variable(user_id=current_user.id, name=var_name, session=session) except ValueError: # Variable not found, create new one if there are models if model_set: await variable_service.create_variable( user_id=current_user.id, name=var_name, value=models_json, type_=GENERIC_TYPE, session=session, ) except HTTPException: raise except Exception as e: logger.exception( "Failed to save model list variable %s for user %s", var_name, current_user.id, ) raise HTTPException( status_code=500, detail="Failed to save model configuration. Please try again later.", ) from e @router.get("/enabled_models", status_code=200) async def get_enabled_models( *, session: DbSession, current_user: CurrentActiveUser, model_names: Annotated[list[str] | None, Query()] = None, ): """Get enabled models for the current user.""" # Get all models - this returns a list of provider dicts with nested models all_models_by_provider = get_unified_models_detailed( include_unsupported=True, include_deprecated=True, ) # Get enabled providers status enabled_providers_result = await get_enabled_providers(session=session, current_user=current_user) provider_status = enabled_providers_result.get("provider_status", {}) # Replace static models with live models for providers that support it configured_providers = {p for p, configured in provider_status.items() if configured} replace_with_live_models(all_models_by_provider, current_user.id, configured_providers) # Get disabled and explicitly enabled models lists disabled_models = await _get_disabled_models(session=session, current_user=current_user) explicitly_enabled_models = await _get_enabled_models(session=session, current_user=current_user) # Build model status based on provider enablement enabled_models: dict[str, dict[str, bool]] = {} # Iterate through providers and their models for provider_dict in all_models_by_provider: provider = provider_dict.get("provider") models = provider_dict.get("models", []) # Initialize provider dict if not exists if provider not in enabled_models: enabled_models[provider] = {} for model in models: model_name = model.get("model_name") metadata = model.get("metadata", {}) # Check if model is deprecated or not supported is_deprecated = metadata.get("deprecated", False) is_not_supported = metadata.get("not_supported", False) is_default = metadata.get("default", False) # Model is enabled if: # 1. Provider is enabled # 2. Model is not deprecated/unsupported # 3. Model is either: # - Marked as default (default=True), OR # - Explicitly enabled by user (in explicitly_enabled_models), AND # - NOT explicitly disabled by user (not in disabled_models) is_enabled = ( provider_status.get(provider, False) and not is_deprecated and not is_not_supported and (is_default or model_name in explicitly_enabled_models) and model_name not in disabled_models ) # Store model status per provider (true/false) enabled_models[provider][model_name] = is_enabled result = { "enabled_models": enabled_models, } if model_names: # Filter enabled_models by requested models filtered_enabled: dict[str, dict[str, bool]] = {} for provider, models_dict in enabled_models.items(): filtered_models = {m: v for m, v in models_dict.items() if m in model_names} if filtered_models: filtered_enabled[provider] = filtered_models return { "enabled_models": filtered_enabled, } return result @router.post("/enabled_models", status_code=200) async def update_enabled_models( *, session: DbSession, current_user: CurrentActiveUser, updates: list[ModelStatusUpdate], ): """Update enabled status for specific models. Accepts a list of model IDs with their desired enabled status. This only affects model-level enablement - provider credentials must still be configured. """ variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): raise HTTPException( status_code=500, detail="Variable service is not an instance of DatabaseVariableService", ) # Limit batch size to prevent abuse if len(updates) > MAX_BATCH_UPDATE_SIZE: raise HTTPException( status_code=400, detail=f"Cannot update more than {MAX_BATCH_UPDATE_SIZE} models at once", ) # Get current disabled and explicitly enabled models disabled_models = await _get_disabled_models(session=session, current_user=current_user) explicitly_enabled_models = await _get_enabled_models(session=session, current_user=current_user) # Build map of model names to their default flag is_default_model = _build_model_default_flags() # Update model sets based on user requests # For any model being enabled, validate the provider credentials for update in updates: if update.enabled: from lfx.base.models.unified_models import get_all_variables_for_provider, validate_model_provider_key # Get variables from DB or environment variables = get_all_variables_for_provider(current_user.id, update.provider) try: # Validate the credentials validate_model_provider_key(update.provider, variables, model_name=update.model_id) except ValueError as e: # Validation failed - return 400 with error message raise HTTPException( status_code=400, detail=f"Validation failed for {update.provider}: {e}", ) from e except Exception as e: logger.exception("Unexpected error validating provider %s", update.provider) raise HTTPException( status_code=400, detail=f"Validation failed for {update.provider}: {e}", ) from e _update_model_sets(updates, disabled_models, explicitly_enabled_models, is_default_model) # Log the operation for audit trail logger.info( "User %s updated model status: %d models affected", current_user.id, len(updates), ) # Save updated model lists await _save_model_list_variable(variable_service, session, current_user, DISABLED_MODELS_VAR, disabled_models) await _save_model_list_variable( variable_service, session, current_user, ENABLED_MODELS_VAR, explicitly_enabled_models ) # Return the updated model status return { "disabled_models": list(disabled_models), "enabled_models": list(explicitly_enabled_models), } class DefaultModelRequest(BaseModel): """Request model for setting default model.""" model_name: str provider: str model_type: str # 'language' or 'embedding' @field_validator("model_name", "provider") @classmethod def validate_non_empty_string(cls, v: str) -> str: """Ensure strings are non-empty and reasonable length.""" if not v or not v.strip(): msg = "Field cannot be empty" raise ValueError(msg) if len(v) > MAX_STRING_LENGTH: msg = f"Field exceeds maximum length of {MAX_STRING_LENGTH} characters" raise ValueError(msg) return v.strip() @field_validator("model_type") @classmethod def validate_model_type(cls, v: str) -> str: """Ensure model_type is valid.""" if v not in ("language", "embedding"): msg = "model_type must be 'language' or 'embedding'" raise ValueError(msg) return v @router.get("/default_model", status_code=200) async def get_default_model( *, session: DbSession, current_user: CurrentActiveUser, model_type: Annotated[str, Query(description="Type of model: 'language' or 'embedding'")] = "language", ): """Get the default model for the current user.""" variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): return {"default_model": None} var_name = DEFAULT_LANGUAGE_MODEL_VAR if model_type == "language" else DEFAULT_EMBEDDING_MODEL_VAR try: var = await variable_service.get_variable_object(user_id=current_user.id, name=var_name, session=session) if var.value: try: parsed_value = json.loads(var.value) except (json.JSONDecodeError, TypeError): logger.warning("Failed to parse default model for user %s", current_user.id, exc_info=True) return {"default_model": None} else: # Validate structure if not isinstance(parsed_value, dict) or not all( k in parsed_value for k in ("model_name", "provider", "model_type") ): logger.warning("Invalid default model format for user %s", current_user.id) return {"default_model": None} return {"default_model": parsed_value} except ValueError: # Variable not found pass return {"default_model": None} @router.post("/default_model", status_code=200) async def set_default_model( *, session: DbSession, current_user: CurrentActiveUser, request: DefaultModelRequest, ): """Set the default model for the current user.""" variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): raise HTTPException( status_code=500, detail="Variable service is not an instance of DatabaseVariableService", ) var_name = DEFAULT_LANGUAGE_MODEL_VAR if request.model_type == "language" else DEFAULT_EMBEDDING_MODEL_VAR # Log the operation for audit trail logger.info( "User %s setting default %s model to %s (%s)", current_user.id, request.model_type, request.model_name, request.provider, ) # Prepare the model data model_data = { "model_name": request.model_name, "provider": request.provider, "model_type": request.model_type, } model_json = json.dumps(model_data) # Check if the variable already exists try: existing_var = await variable_service.get_variable_object( user_id=current_user.id, name=var_name, session=session ) if existing_var is None or existing_var.id is None: msg = f"Variable {DISABLED_MODELS_VAR} not found" raise ValueError(msg) # Update existing variable from langflow.services.database.models.variable.model import VariableUpdate await variable_service.update_variable_fields( user_id=current_user.id, variable_id=existing_var.id, variable=VariableUpdate(id=existing_var.id, name=var_name, value=model_json, type=GENERIC_TYPE), session=session, ) except ValueError: # Variable not found, create new one await variable_service.create_variable( user_id=current_user.id, name=var_name, value=model_json, type_=GENERIC_TYPE, session=session, ) except HTTPException: raise except Exception as e: logger.exception( "Failed to set default model for user %s", current_user.id, ) raise HTTPException( status_code=500, detail="Failed to set default model. Please try again later.", ) from e return {"default_model": model_data} @router.delete("/default_model", status_code=200) async def clear_default_model( *, session: DbSession, current_user: CurrentActiveUser, model_type: Annotated[str, Query(description="Type of model: 'language' or 'embedding'")] = "language", ): """Clear the default model for the current user.""" variable_service = get_variable_service() if not isinstance(variable_service, DatabaseVariableService): raise HTTPException( status_code=500, detail="Variable service is not an instance of DatabaseVariableService", ) var_name = DEFAULT_LANGUAGE_MODEL_VAR if model_type == "language" else DEFAULT_EMBEDDING_MODEL_VAR # Log the operation for audit trail logger.info( "User %s clearing default %s model", current_user.id, model_type, ) # Check if the variable exists and delete it try: existing_var = await variable_service.get_variable_object( user_id=current_user.id, name=var_name, session=session ) await variable_service.delete_variable(user_id=current_user.id, name=existing_var.name, session=session) except ValueError: # Variable not found, nothing to delete pass except HTTPException: raise except Exception as e: logger.exception( "Failed to clear default model for user %s", current_user.id, ) raise HTTPException( status_code=500, detail="Failed to clear default model. Please try again later.", ) from e return {"default_model": None}
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/base/langflow/api/v1/models.py", "license": "MIT License", "lines": 736, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
langflow-ai/langflow:src/backend/tests/unit/api/v1/test_models_enabled_providers.py
"""Tests for model provider enabled_providers endpoint and credential redaction.""" from unittest import mock import pytest from fastapi import status from httpx import AsyncClient from langflow.services.variable.constants import CREDENTIAL_TYPE from lfx.base.models.unified_models import get_model_provider_variable_mapping # Get provider to variable name mapping _provider_variable_mapping = get_model_provider_variable_mapping() def _create_variable_payload(provider: str, value: str) -> dict: """Helper to create variable payload for a model provider credential.""" variable_name = _provider_variable_mapping.get(provider) if not variable_name: msg = f"Unknown provider: {provider}" raise ValueError(msg) return { "name": variable_name, "value": value, "type": CREDENTIAL_TYPE, "default_fields": [provider, "api_key"], } @pytest.fixture def openai_credential(): """OpenAI credential fixture.""" return { "name": "API Key", "provider": "OpenAI", "value": "sk-test-openai-key-123456789", "description": "OpenAI API key for GPT models", } @pytest.fixture def anthropic_credential(): """Anthropic credential fixture.""" return { "name": "API Key", "provider": "Anthropic", "value": "sk-ant-test-anthropic-key-123456789", "description": "Anthropic API key for Claude models", } @pytest.fixture def google_credential(): """Google credential fixture.""" return { "name": "API Key", "provider": "Google Generative AI", "value": "AIzaSyTest-google-key-123456789", "description": "Google API key for Gemini models", } @pytest.mark.usefixtures("active_user") async def test_enabled_providers_empty_initially(client: AsyncClient, logged_in_headers): """Test that enabled_providers returns empty status when no credentials exist.""" response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK assert "enabled_providers" in result assert "provider_status" in result assert isinstance(result["enabled_providers"], list) assert isinstance(result["provider_status"], dict) @pytest.mark.usefixtures("active_user") async def test_enabled_providers_after_credential_creation(client: AsyncClient, openai_credential, logged_in_headers): """Test that provider status changes after credential creation.""" # Clean up any existing OPENAI_API_KEY variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) openai_var_name = _provider_variable_mapping.get("OpenAI") for var in all_vars.json(): if var.get("name") == openai_var_name: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Check initial status initial_response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) initial_result = initial_response.json() assert initial_response.status_code == status.HTTP_200_OK openai_initially_enabled = initial_result.get("provider_status", {}).get("OpenAI", False) # Create OpenAI credential using variables endpoint variable_payload = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) # Mock API validation - mock where it's used (in the variable endpoint) with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None # validate_model_provider_key returns None on success create_response = await client.post("api/v1/variables/", json=variable_payload, headers=logged_in_headers) assert create_response.status_code == status.HTTP_201_CREATED # Check status after credential creation # Mock validation for enabled_providers endpoint as well with mock.patch("lfx.base.models.unified_models.validate_model_provider_key") as mock_validate: mock_validate.return_value = None after_response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) after_result = after_response.json() assert after_response.status_code == status.HTTP_200_OK assert "OpenAI" in after_result["enabled_providers"] assert after_result["provider_status"]["OpenAI"] is True # Verify the status changed assert after_result["provider_status"]["OpenAI"] != openai_initially_enabled or openai_initially_enabled is True @pytest.mark.usefixtures("active_user") async def test_enabled_providers_multiple_credentials( client: AsyncClient, openai_credential, anthropic_credential, google_credential, logged_in_headers ): """Test provider status with multiple credentials.""" # Clean up any existing variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) var_names = { _provider_variable_mapping.get("OpenAI"), _provider_variable_mapping.get("Anthropic"), _provider_variable_mapping.get("Google Generative AI"), } for var in all_vars.json(): if var.get("name") in var_names: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create multiple credentials using variables endpoint openai_var = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) anthropic_var = _create_variable_payload(anthropic_credential["provider"], anthropic_credential["value"]) google_var = _create_variable_payload(google_credential["provider"], google_credential["value"]) # Mock API validations with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None await client.post("api/v1/variables/", json=openai_var, headers=logged_in_headers) await client.post("api/v1/variables/", json=anthropic_var, headers=logged_in_headers) await client.post("api/v1/variables/", json=google_var, headers=logged_in_headers) # Check enabled providers - mock validation for enabled_providers endpoint with mock.patch("lfx.base.models.unified_models.validate_model_provider_key") as mock_validate: mock_validate.return_value = None response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK assert "OpenAI" in result["enabled_providers"] assert "Anthropic" in result["enabled_providers"] assert "Google Generative AI" in result["enabled_providers"] assert result["provider_status"]["OpenAI"] is True assert result["provider_status"]["Anthropic"] is True assert result["provider_status"]["Google Generative AI"] is True @pytest.mark.usefixtures("active_user") async def test_enabled_providers_after_credential_deletion(client: AsyncClient, openai_credential, logged_in_headers): """Test that provider status updates after credential deletion.""" # Get initial OpenAI credentials to clean up (using variables endpoint) all_variables = await client.get("api/v1/variables/", headers=logged_in_headers) openai_var_name = _provider_variable_mapping.get("OpenAI") for var in all_variables.json(): if var.get("name") == openai_var_name: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create credential using variables endpoint variable_payload = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) # Mock API validation with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None create_response = await client.post("api/v1/variables/", json=variable_payload, headers=logged_in_headers) created_credential = create_response.json() credential_id = created_credential["id"] # Verify enabled - mock validation for enabled_providers endpoint as well with mock.patch("lfx.base.models.unified_models.validate_model_provider_key") as mock_validate: mock_validate.return_value = None enabled_response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) enabled_result = enabled_response.json() assert "OpenAI" in enabled_result["enabled_providers"] assert enabled_result["provider_status"]["OpenAI"] is True # Delete credential delete_response = await client.delete(f"api/v1/variables/{credential_id}", headers=logged_in_headers) assert delete_response.status_code == status.HTTP_204_NO_CONTENT # Verify disabled disabled_response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) disabled_result = disabled_response.json() assert "OpenAI" not in disabled_result["enabled_providers"] # When no credentials exist, provider_status may be empty or OpenAI should be False assert disabled_result["provider_status"].get("OpenAI", False) is False @pytest.mark.usefixtures("active_user") async def test_enabled_providers_filter_by_specific_providers( client: AsyncClient, openai_credential, anthropic_credential, logged_in_headers ): """Test filtering enabled_providers by specific providers.""" # Clean up any existing variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) var_names = { _provider_variable_mapping.get("OpenAI"), _provider_variable_mapping.get("Anthropic"), } for var in all_vars.json(): if var.get("name") in var_names: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create credentials using variables endpoint openai_var = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) anthropic_var = _create_variable_payload(anthropic_credential["provider"], anthropic_credential["value"]) # Mock API validations with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None await client.post("api/v1/variables/", json=openai_var, headers=logged_in_headers) await client.post("api/v1/variables/", json=anthropic_var, headers=logged_in_headers) # Request specific providers (only providers that are in the mapping) - mock validation with mock.patch("lfx.base.models.unified_models.validate_model_provider_key") as mock_validate: mock_validate.return_value = None response = await client.get( "api/v1/models/enabled_providers?providers=OpenAI&providers=Anthropic", headers=logged_in_headers ) result = response.json() assert response.status_code == status.HTTP_200_OK assert "OpenAI" in result["enabled_providers"] assert "Anthropic" in result["enabled_providers"] assert "OpenAI" in result["provider_status"] assert result["provider_status"]["OpenAI"] is True assert "Anthropic" in result["provider_status"] assert result["provider_status"]["Anthropic"] is True # Test filtering with non-existent provider (should not error, just return empty) response2 = await client.get( "api/v1/models/enabled_providers?providers=NonExistentProvider", headers=logged_in_headers ) result2 = response2.json() assert response2.status_code == status.HTTP_200_OK assert result2["enabled_providers"] == [] # NonExistentProvider is not in the mapping, so it won't be in provider_status assert "NonExistentProvider" not in result2["provider_status"] @pytest.mark.usefixtures("active_user") async def test_variables_credential_redaction(client: AsyncClient, openai_credential, logged_in_headers): """Test that credential variables have credentials properly redacted.""" # Clean up any existing OPENAI_API_KEY variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) openai_var_name = _provider_variable_mapping.get("OpenAI") for var in all_vars.json(): if var.get("name") == openai_var_name: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create a credential using variables endpoint variable_payload = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) # Mock API validation with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None create_response = await client.post("api/v1/variables/", json=variable_payload, headers=logged_in_headers) assert create_response.status_code == status.HTTP_201_CREATED created_credential = create_response.json() # Get all variables response = await client.get("api/v1/variables/", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK assert isinstance(result, list) # Find the created credential in the response credential_variables = [v for v in result if v.get("id") == created_credential["id"]] assert len(credential_variables) == 1 credential_variable = credential_variables[0] # Verify credential is redacted (value should be None for CREDENTIAL_TYPE) assert credential_variable["value"] is None assert credential_variable["type"] == CREDENTIAL_TYPE @pytest.mark.usefixtures("active_user") async def test_variables_multiple_credentials_all_redacted( client: AsyncClient, openai_credential, anthropic_credential, logged_in_headers ): """Test that all credentials are redacted when fetching all variables.""" # Clean up any existing variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) var_names = { _provider_variable_mapping.get("OpenAI"), _provider_variable_mapping.get("Anthropic"), } for var in all_vars.json(): if var.get("name") in var_names: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create multiple credentials using variables endpoint openai_var = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) anthropic_var = _create_variable_payload(anthropic_credential["provider"], anthropic_credential["value"]) # Mock API validations with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None create_response1 = await client.post("api/v1/variables/", json=openai_var, headers=logged_in_headers) create_response2 = await client.post("api/v1/variables/", json=anthropic_var, headers=logged_in_headers) assert create_response1.status_code == status.HTTP_201_CREATED assert create_response2.status_code == status.HTTP_201_CREATED # Get all variables response = await client.get("api/v1/variables/", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK # Verify all credentials are redacted for variable in result: if variable.get("type") == CREDENTIAL_TYPE: # Credential values should be None (redacted) assert variable["value"] is None @pytest.mark.usefixtures("active_user") async def test_enabled_providers_reflects_models_endpoint(client: AsyncClient, openai_credential, logged_in_headers): """Test that /models endpoint reflects same is_enabled status as /enabled_providers.""" # Clean up any existing OPENAI_API_KEY variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) openai_var_name = _provider_variable_mapping.get("OpenAI") for var in all_vars.json(): if var.get("name") == openai_var_name: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) # Create credential using variables endpoint variable_payload = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) # Mock API validation with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None await client.post("api/v1/variables/", json=variable_payload, headers=logged_in_headers) # Get enabled providers and models - mock validation in unified_models so providers are marked enabled with mock.patch("lfx.base.models.unified_models.validate_model_provider_key") as mock_validate: mock_validate.return_value = None enabled_response = await client.get("api/v1/models/enabled_providers", headers=logged_in_headers) enabled_result = enabled_response.json() # Get models (which should include provider information) models_response = await client.get("api/v1/models", headers=logged_in_headers) models_result = models_response.json() assert models_response.status_code == status.HTTP_200_OK # Check that OpenAI models have is_enabled=True openai_models = [m for m in models_result if m.get("provider") == "OpenAI"] if openai_models: for model in openai_models: assert model.get("is_enabled") is True # Verify consistency with enabled_providers assert enabled_result["provider_status"]["OpenAI"] is True @pytest.mark.usefixtures("active_user") async def test_security_credential_value_never_exposed_in_variables_endpoint( client: AsyncClient, openai_credential, logged_in_headers ): """Critical security test: ensure credential values are NEVER exposed in plain text.""" # Clean up any existing OPENAI_API_KEY variables all_vars = await client.get("api/v1/variables/", headers=logged_in_headers) openai_var_name = _provider_variable_mapping.get("OpenAI") for var in all_vars.json(): if var.get("name") == openai_var_name: await client.delete(f"api/v1/variables/{var['id']}", headers=logged_in_headers) original_value = openai_credential["value"] # Create credential using variables endpoint variable_payload = _create_variable_payload(openai_credential["provider"], openai_credential["value"]) # Mock API validation with mock.patch("langflow.api.v1.variable.validate_model_provider_key") as mock_validate: mock_validate.return_value = None create_response = await client.post("api/v1/variables/", json=variable_payload, headers=logged_in_headers) assert create_response.status_code == status.HTTP_201_CREATED # Get all variables - this is the security-critical path response = await client.get("api/v1/variables/", headers=logged_in_headers) result = response.json() # CRITICAL: Original value must NEVER appear in response response_text = str(result) assert original_value not in response_text # Verify each credential is properly redacted (set to None) for variable in result: if variable.get("type") == CREDENTIAL_TYPE: # CRITICAL: Value must be None (redacted), never the original value assert variable["value"] is None @pytest.mark.usefixtures("active_user") async def test_provider_variable_mapping_returns_full_variable_info(client: AsyncClient, logged_in_headers): """Test that provider-variable-mapping endpoint returns full variable info for each provider.""" response = await client.get("api/v1/models/provider-variable-mapping", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK assert isinstance(result, dict) # Check that known providers exist assert "OpenAI" in result assert "Anthropic" in result assert "Google Generative AI" in result assert "Ollama" in result assert "IBM WatsonX" in result # Check structure of variables for OpenAI (single variable provider) openai_vars = result["OpenAI"] assert isinstance(openai_vars, list) assert len(openai_vars) >= 1 # Check each variable has required fields for var in openai_vars: assert "variable_name" in var assert "variable_key" in var assert "required" in var assert "is_secret" in var assert "is_list" in var assert "options" in var # Check OpenAI primary variable (order-independent) openai_api_key_var = next((v for v in openai_vars if v["variable_key"] == "OPENAI_API_KEY"), None) assert openai_api_key_var is not None assert openai_api_key_var["required"] is True assert openai_api_key_var["is_secret"] is True @pytest.mark.usefixtures("active_user") async def test_provider_variable_mapping_multi_variable_provider(client: AsyncClient, logged_in_headers): """Test that IBM WatsonX returns multiple required variables.""" response = await client.get("api/v1/models/provider-variable-mapping", headers=logged_in_headers) result = response.json() assert response.status_code == status.HTTP_200_OK # Check IBM WatsonX has multiple variables watsonx_vars = result.get("IBM WatsonX", []) assert len(watsonx_vars) >= 3 # API Key, Project ID, URL # Find each variable var_keys = {v["variable_key"] for v in watsonx_vars} assert "WATSONX_APIKEY" in var_keys assert "WATSONX_PROJECT_ID" in var_keys assert "WATSONX_URL" in var_keys # Check API Key is secret api_key_var = next((v for v in watsonx_vars if v["variable_key"] == "WATSONX_APIKEY"), None) assert api_key_var is not None assert api_key_var["is_secret"] is True assert api_key_var["required"] is True # Check Project ID is not secret project_id_var = next((v for v in watsonx_vars if v["variable_key"] == "WATSONX_PROJECT_ID"), None) assert project_id_var is not None assert project_id_var["is_secret"] is False assert project_id_var["required"] is True # Check URL has options url_var = next((v for v in watsonx_vars if v["variable_key"] == "WATSONX_URL"), None) assert url_var is not None assert url_var["is_secret"] is False assert url_var["required"] is True assert len(url_var["options"]) > 0 # Should have regional endpoint options assert "https://us-south.ml.cloud.ibm.com" in url_var["options"] @pytest.mark.usefixtures("active_user") async def test_backward_compatible_variable_mapping(client: AsyncClient, logged_in_headers): # noqa: ARG001 """Test that get_model_provider_variable_mapping() still returns primary variable (backward compat).""" from lfx.base.models.unified_models import get_model_provider_variable_mapping mapping = get_model_provider_variable_mapping() # Should return dict of provider -> primary variable key assert isinstance(mapping, dict) assert mapping.get("OpenAI") == "OPENAI_API_KEY" assert mapping.get("Anthropic") == "ANTHROPIC_API_KEY" assert mapping.get("Google Generative AI") == "GOOGLE_API_KEY" assert mapping.get("Ollama") == "OLLAMA_BASE_URL" # IBM WatsonX should return primary secret (API key) assert mapping.get("IBM WatsonX") == "WATSONX_APIKEY" @pytest.mark.usefixtures("active_user") async def test_list_models_returns_live_ollama_models_when_configured(client: AsyncClient, logged_in_headers): """When Ollama is configured, list_models returns live models from get_live_models_for_provider, not static list.""" live_ollama_models = [ {"name": "llama3.2", "icon": "Ollama", "tool_calling": True}, {"name": "mistral", "icon": "Ollama", "tool_calling": True}, ] async def mock_get_enabled_providers(*_args, **_kwargs): return { "enabled_providers": ["Ollama"], "provider_status": {"Ollama": True}, } def mock_get_live_models(_user_id, provider, model_type="llm"): if provider == "Ollama" and model_type == "llm": return live_ollama_models return [] with ( mock.patch( "langflow.api.v1.models.get_enabled_providers", side_effect=mock_get_enabled_providers, ), mock.patch( "lfx.base.models.model_utils.get_live_models_for_provider", side_effect=mock_get_live_models, ), ): response = await client.get("api/v1/models", headers=logged_in_headers) assert response.status_code == status.HTTP_200_OK data = response.json() ollama_provider = next((p for p in data if p.get("provider") == "Ollama"), None) assert ollama_provider is not None model_names = [m["model_name"] for m in ollama_provider["models"]] assert set(model_names) == {"llama3.2", "mistral"} assert len(model_names) == 2 assert ollama_provider["num_models"] == 2 @pytest.mark.usefixtures("active_user") async def test_list_models_ollama_empty_when_live_fetch_returns_empty(client: AsyncClient, logged_in_headers): """When Ollama is configured but live fetch returns no models, Ollama should have no models (no static fallback).""" async def mock_get_enabled_providers(*_args, **_kwargs): return { "enabled_providers": ["Ollama"], "provider_status": {"Ollama": True}, } with ( mock.patch( "langflow.api.v1.models.get_enabled_providers", side_effect=mock_get_enabled_providers, ), mock.patch( "lfx.base.models.model_utils.get_live_models_for_provider", return_value=[], ), ): response = await client.get("api/v1/models", headers=logged_in_headers) assert response.status_code == status.HTTP_200_OK data = response.json() ollama_provider = next((p for p in data if p.get("provider") == "Ollama"), None) assert ollama_provider is not None assert len(ollama_provider["models"]) == 0 assert ollama_provider["num_models"] == 0
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/api/v1/test_models_enabled_providers.py", "license": "MIT License", "lines": 462, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_models_api.py
import pytest from httpx import AsyncClient def _flatten_models(result_json): for provider_dict in result_json: yield from provider_dict["models"] @pytest.mark.asyncio async def test_models_endpoint_default(client: AsyncClient, logged_in_headers): response = await client.get("api/v1/models", headers=logged_in_headers) assert response.status_code == 200 data = response.json() providers = {entry["provider"] for entry in data} assert "OpenAI" in providers assert "Anthropic" in providers assert "Google Generative AI" in providers for model in _flatten_models(data): assert model["metadata"].get("not_supported", False) is False @pytest.mark.asyncio async def test_models_endpoint_filter_provider(client: AsyncClient, logged_in_headers): response = await client.get("api/v1/models", params={"provider": "Anthropic"}, headers=logged_in_headers) assert response.status_code == 200 data = response.json() assert len(data) == 1 assert data[0]["provider"] == "Anthropic" @pytest.mark.asyncio async def test_models_endpoint_filter_model_type(client: AsyncClient, logged_in_headers): response = await client.get("api/v1/models", params={"model_type": "embeddings"}, headers=logged_in_headers) assert response.status_code == 200 data = response.json() models = list(_flatten_models(data)) assert models, "Expected at least one embedding model through API" for model in models: assert model["metadata"].get("model_type", "llm") == "embeddings"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_models_api.py", "license": "MIT License", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
langflow-ai/langflow:src/backend/tests/unit/test_unified_models.py
from langflow.base.models.unified_models import get_unified_models_detailed def _flatten_models(result): """Helper to flatten result to list of model dicts.""" for provider_dict in result: yield from provider_dict["models"] def test_default_providers_present(): result = get_unified_models_detailed() providers = {entry["provider"] for entry in result} assert "OpenAI" in providers assert "Anthropic" in providers assert "Google Generative AI" in providers def test_default_excludes_not_supported(): result = get_unified_models_detailed() for model in _flatten_models(result): # By default, models flagged not_supported should be absent assert model["metadata"].get("not_supported", False) is False def test_default_excludes_deprecated(): result = get_unified_models_detailed() for model in _flatten_models(result): # By default, models flagged deprecated should be absent assert model["metadata"].get("deprecated", False) is False def test_include_deprecated_parameter_returns_deprecated_models(): # When explicitly requested, at least one deprecated model should be present. result = get_unified_models_detailed(include_deprecated=True) deprecated_models = [m for m in _flatten_models(result) if m["metadata"].get("deprecated", False)] assert deprecated_models, "Expected at least one deprecated model when include_deprecated=True" # Sanity check: restricting by provider that is known to have deprecated entries (e.g., Anthropic) result_anthropic = get_unified_models_detailed(providers=["Anthropic"], include_deprecated=True) anthropic_deprecated = [m for m in _flatten_models(result_anthropic) if m["metadata"].get("deprecated", False)] assert anthropic_deprecated, "Expected deprecated Anthropic models when include_deprecated=True" def test_filter_by_provider(): result = get_unified_models_detailed(provider="Anthropic") # Only one provider should be returned assert len(result) == 1 assert result[0]["provider"] == "Anthropic" # Ensure all models are from that provider for _model in _flatten_models(result): assert result[0]["provider"] == "Anthropic" def test_filter_by_model_name(): target = "gpt-4" result = get_unified_models_detailed(model_name=target) # Should only include OpenAI provider with the single model assert len(result) == 1 provider_dict = result[0] assert provider_dict["provider"] == "OpenAI" assert len(provider_dict["models"]) == 1 assert provider_dict["models"][0]["model_name"] == target def test_filter_by_metadata(): # Require tool_calling support result = get_unified_models_detailed(tool_calling=True) assert result, "Expected at least one model supporting tool calling" for model in _flatten_models(result): assert model["metadata"].get("tool_calling", False) is True def test_filter_by_model_type_embeddings(): result = get_unified_models_detailed(model_type="embeddings") models = list(_flatten_models(result)) assert models, "Expected at least one embedding model" for model in models: assert model["metadata"].get("model_type", "llm") == "embeddings"
{ "repo_id": "langflow-ai/langflow", "file_path": "src/backend/tests/unit/test_unified_models.py", "license": "MIT License", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test