index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/query_constructors/test_supabase.py
from typing import Dict, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, ) from langchain_community.query_constructors.supabase import SupabaseVectorTranslator DEFAULT_TRANSLATOR = SupabaseVectorTranslator() def test_visit_comparison() -> None: comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) expected = "and(metadata->>foo.lt.1,metadata->>foo.lt.2)" actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual def test_visit_operation() -> None: op = Operation( operator=Operator.AND, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), ], ) expected = ( "and(" "metadata->foo.lt.2," "metadata->>bar.eq.baz," "and(metadata->>abc.lt.1,metadata->>abc.lt.2)" ")" ) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual def test_visit_structured_query() -> None: query = "What is the capital of France?" structured_query = StructuredQuery( query=query, filter=None, ) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) expected = ( query, {"postgrest_filter": "and(metadata->>foo.lt.1,metadata->>foo.lt.2)"}, ) structured_query = StructuredQuery( query=query, filter=comp, ) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation( operator=Operator.AND, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), ], ) structured_query = StructuredQuery( query=query, filter=op, ) expected = ( query, { "postgrest_filter": ( "and(metadata->foo.lt.2,metadata->>bar.eq.baz,and(metadata->>abc.lt.1,metadata->>abc.lt.2))" ) }, ) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/query_constructors/test_redis.py
from typing import Dict, Tuple import pytest from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, ) from langchain_community.query_constructors.redis import RedisTranslator from langchain_community.vectorstores.redis.filters import ( RedisFilterExpression, RedisNum, RedisTag, RedisText, ) from langchain_community.vectorstores.redis.schema import ( NumericFieldSchema, RedisModel, TagFieldSchema, TextFieldSchema, ) @pytest.fixture def translator() -> RedisTranslator: schema = RedisModel( text=[TextFieldSchema(name="bar")], numeric=[NumericFieldSchema(name="foo")], tag=[TagFieldSchema(name="tag")], ) return RedisTranslator(schema) @pytest.mark.parametrize( ("comp", "expected"), [ ( Comparison(comparator=Comparator.LT, attribute="foo", value=1), RedisNum("foo") < 1, ), ( Comparison(comparator=Comparator.LIKE, attribute="bar", value="baz*"), RedisText("bar") % "baz*", ), ( Comparison( comparator=Comparator.CONTAIN, attribute="tag", value=["blue", "green"] ), RedisTag("tag") == ["blue", "green"], ), ], ) def test_visit_comparison( translator: RedisTranslator, comp: Comparison, expected: RedisFilterExpression ) -> None: comp = Comparison(comparator=Comparator.LT, attribute="foo", value=1) expected = RedisNum("foo") < 1 actual = translator.visit_comparison(comp) assert str(expected) == str(actual) def test_visit_operation(translator: RedisTranslator) -> None: op = Operation( operator=Operator.AND, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), Comparison(comparator=Comparator.EQ, attribute="tag", value="high"), ], ) expected = (RedisNum("foo") < 2) & ( (RedisText("bar") == "baz") & (RedisTag("tag") == "high") ) actual = translator.visit_operation(op) assert str(expected) == str(actual) def test_visit_structured_query_no_filter(translator: RedisTranslator) -> None: query = "What is the capital of France?" structured_query = StructuredQuery( query=query, filter=None, ) expected: Tuple[str, Dict] = (query, {}) actual = translator.visit_structured_query(structured_query) assert expected == actual def test_visit_structured_query_comparison(translator: RedisTranslator) -> None: query = "What is the capital of France?" comp = Comparison(comparator=Comparator.GTE, attribute="foo", value=2) structured_query = StructuredQuery( query=query, filter=comp, ) expected_filter = RedisNum("foo") >= 2 actual_query, actual_filter = translator.visit_structured_query(structured_query) assert actual_query == query assert str(actual_filter["filter"]) == str(expected_filter) def test_visit_structured_query_operation(translator: RedisTranslator) -> None: query = "What is the capital of France?" op = Operation( operator=Operator.OR, arguments=[ Comparison(comparator=Comparator.EQ, attribute="foo", value=2), Comparison(comparator=Comparator.CONTAIN, attribute="bar", value="baz"), ], ) structured_query = StructuredQuery( query=query, filter=op, ) expected_filter = (RedisNum("foo") == 2) | (RedisText("bar") == "baz") actual_query, actual_filter = translator.visit_structured_query(structured_query) assert actual_query == query assert str(actual_filter["filter"]) == str(expected_filter)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/query_constructors/test_neo4j.py
from typing import Dict, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, ) from langchain_community.query_constructors.neo4j import Neo4jTranslator DEFAULT_TRANSLATOR = Neo4jTranslator() def test_visit_comparison() -> None: comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) expected = {"foo": {"$lt": ["1", "2"]}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual def test_visit_operation() -> None: op = Operation( operator=Operator.AND, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), ], ) expected = { "$and": [ {"foo": {"$lt": 2}}, {"bar": {"$eq": "baz"}}, {"abc": {"$lt": ["1", "2"]}}, ] } actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual def test_visit_structured_query() -> None: query = "What is the capital of France?" structured_query = StructuredQuery( query=query, filter=None, ) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) expected = ( query, {"filter": {"foo": {"$lt": ["1", "2"]}}}, ) structured_query = StructuredQuery( query=query, filter=comp, ) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation( operator=Operator.AND, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), ], ) structured_query = StructuredQuery( query=query, filter=op, ) expected = ( query, { "filter": { "$and": [ {"foo": {"$lt": 2}}, {"bar": {"$eq": "baz"}}, {"abc": {"$lt": ["1", "2"]}}, ] } }, ) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/query_constructors/test_dashvector.py
from typing import Any, Tuple import pytest from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, ) from langchain_community.query_constructors.dashvector import DashvectorTranslator DEFAULT_TRANSLATOR = DashvectorTranslator() @pytest.mark.parametrize( "triplet", [ (Comparator.EQ, 2, "foo = 2"), (Comparator.LT, 2, "foo < 2"), (Comparator.LTE, 2, "foo <= 2"), (Comparator.GT, 2, "foo > 2"), (Comparator.GTE, 2, "foo >= 2"), (Comparator.LIKE, "bar", "foo LIKE '%bar%'"), ], ) def test_visit_comparison(triplet: Tuple[Comparator, Any, str]) -> None: comparator, value, expected = triplet actual = DEFAULT_TRANSLATOR.visit_comparison( Comparison(comparator=comparator, attribute="foo", value=value) ) assert expected == actual @pytest.mark.parametrize( "triplet", [ (Operator.AND, "foo < 2 AND bar = 'baz'"), (Operator.OR, "foo < 2 OR bar = 'baz'"), ], ) def test_visit_operation(triplet: Tuple[Operator, str]) -> None: operator, expected = triplet op = Operation( operator=operator, arguments=[ Comparison(comparator=Comparator.LT, attribute="foo", value=2), Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), ], ) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/graphs/test_ontotext_graphdb_graph.py
import os import tempfile import unittest import pytest class TestOntotextGraphDBGraph(unittest.TestCase): def test_import(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph # noqa: F401 @pytest.mark.requires("rdflib") def test_validate_user_query_wrong_type(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(TypeError) as e: OntotextGraphDBGraph._validate_user_query( [ # type: ignore[arg-type] "PREFIX starwars: <https://swapi.co/ontology/> " "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " "DESCRIBE starwars: ?term " "WHERE {?term rdfs:isDefinedBy starwars: }" ] ) self.assertEqual("Ontology query must be provided as string.", str(e.exception)) @pytest.mark.requires("rdflib") def test_validate_user_query_invalid_sparql_syntax(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph._validate_user_query( "CONSTRUCT {?s ?p ?o} FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o" ) self.assertEqual( "('Ontology query is not a valid SPARQL query.', " "Expected ConstructQuery, " "found end of text (at char 70), (line:1, col:71))", str(e.exception), ) @pytest.mark.requires("rdflib") def test_validate_user_query_invalid_query_type_select(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph._validate_user_query("SELECT * { ?s ?p ?o }") self.assertEqual( "Invalid query type. Only CONSTRUCT queries are supported.", str(e.exception), ) @pytest.mark.requires("rdflib") def test_validate_user_query_invalid_query_type_ask(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph._validate_user_query("ASK { ?s ?p ?o }") self.assertEqual( "Invalid query type. Only CONSTRUCT queries are supported.", str(e.exception), ) @pytest.mark.requires("rdflib") def test_validate_user_query_invalid_query_type_describe(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph._validate_user_query( "PREFIX swapi: <https://swapi.co/ontology/> " "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " "DESCRIBE ?term WHERE { ?term rdfs:isDefinedBy swapi: }" ) self.assertEqual( "Invalid query type. Only CONSTRUCT queries are supported.", str(e.exception), ) @pytest.mark.requires("rdflib") def test_validate_user_query_construct(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph OntotextGraphDBGraph._validate_user_query( "CONSTRUCT {?s ?p ?o} FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}" ) @pytest.mark.requires("rdflib") def test_check_connectivity(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph( query_endpoint="http://localhost:7200/repositories/non-existing-repository", query_ontology="PREFIX swapi: <https://swapi.co/ontology/> " "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " "DESCRIBE ?term WHERE {?term rdfs:isDefinedBy swapi: }", ) self.assertEqual( "Could not query the provided endpoint. " "Please, check, if the value of the provided " "query_endpoint points to the right repository. " "If GraphDB is secured, please, make sure that the environment variables " "'GRAPHDB_USERNAME' and 'GRAPHDB_PASSWORD' are set.", str(e.exception), ) @pytest.mark.requires("rdflib") def test_local_file_does_not_exist(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph non_existing_file = os.path.join("non", "existing", "path", "to", "file.ttl") with self.assertRaises(FileNotFoundError) as e: OntotextGraphDBGraph._load_ontology_schema_from_file(non_existing_file) self.assertEqual(f"File {non_existing_file} does not exist.", str(e.exception)) @pytest.mark.requires("rdflib") def test_local_file_no_access(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with tempfile.NamedTemporaryFile() as tmp_file: tmp_file_name = tmp_file.name # Set file permissions to write and execute only os.chmod(tmp_file_name, 0o300) with self.assertRaises(PermissionError) as e: OntotextGraphDBGraph._load_ontology_schema_from_file(tmp_file_name) self.assertEqual( f"Read permission for {tmp_file_name} is restricted", str(e.exception) ) @pytest.mark.requires("rdflib") def test_local_file_bad_syntax(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with tempfile.TemporaryDirectory() as tempdir: tmp_file_path = os.path.join(tempdir, "starwars-ontology.trig") with open(tmp_file_path, "w") as tmp_file: tmp_file.write("invalid trig") with self.assertRaises(ValueError) as e: OntotextGraphDBGraph._load_ontology_schema_from_file(tmp_file_path) self.assertEqual( f"('Invalid file format for {tmp_file_path} : '" ", BadSyntax('', 0, 'invalid trig', 0, " "'expected directive or statement'))", str(e.exception), ) @pytest.mark.requires("rdflib") def test_both_query_and_local_file_provided(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph( query_endpoint="http://localhost:7200/repositories/non-existing-repository", query_ontology="CONSTRUCT {?s ?p ?o}" "FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}", local_file="starwars-ontology-wrong.trig", ) self.assertEqual( "Both file and query provided. Only one is allowed.", str(e.exception) ) @pytest.mark.requires("rdflib") def test_nor_query_nor_local_file_provided(self) -> None: from langchain_community.graphs import OntotextGraphDBGraph with self.assertRaises(ValueError) as e: OntotextGraphDBGraph( query_endpoint="http://localhost:7200/repositories/non-existing-repository", ) self.assertEqual( "Neither file nor query provided. One is required.", str(e.exception) )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/graphs/test_imports.py
from langchain_community.graphs import __all__, _module_lookup EXPECTED_ALL = [ "MemgraphGraph", "NetworkxEntityGraph", "Neo4jGraph", "NebulaGraph", "BaseNeptuneGraph", "NeptuneAnalyticsGraph", "NeptuneGraph", "NeptuneRdfGraph", "KuzuGraph", "HugeGraph", "RdfGraph", "ArangoGraph", "FalkorDBGraph", "TigerGraph", "OntotextGraphDBGraph", "GremlinGraph", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/graphs/test_neo4j_graph.py
from langchain_community.graphs.neo4j_graph import value_sanitize def test_value_sanitize_with_small_list(): # type: ignore[no-untyped-def] small_list = list(range(15)) # list size > LIST_LIMIT input_dict = {"key1": "value1", "small_list": small_list} expected_output = {"key1": "value1", "small_list": small_list} assert value_sanitize(input_dict) == expected_output def test_value_sanitize_with_oversized_list(): # type: ignore[no-untyped-def] oversized_list = list(range(150)) # list size > LIST_LIMIT input_dict = {"key1": "value1", "oversized_list": oversized_list} expected_output = { "key1": "value1" # oversized_list should not be included } assert value_sanitize(input_dict) == expected_output def test_value_sanitize_with_nested_oversized_list(): # type: ignore[no-untyped-def] oversized_list = list(range(150)) # list size > LIST_LIMIT input_dict = {"key1": "value1", "oversized_list": {"key": oversized_list}} expected_output = {"key1": "value1", "oversized_list": {}} assert value_sanitize(input_dict) == expected_output def test_value_sanitize_with_dict_in_list(): # type: ignore[no-untyped-def] oversized_list = list(range(150)) # list size > LIST_LIMIT input_dict = {"key1": "value1", "oversized_list": [1, 2, {"key": oversized_list}]} expected_output = {"key1": "value1", "oversized_list": [1, 2, {}]} assert value_sanitize(input_dict) == expected_output def test_value_sanitize_with_dict_in_nested_list(): # type: ignore[no-untyped-def] input_dict = { "key1": "value1", "deeply_nested_lists": [[[[{"final_nested_key": list(range(200))}]]]], } expected_output = {"key1": "value1", "deeply_nested_lists": [[[[{}]]]]} assert value_sanitize(input_dict) == expected_output
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/graphs/test_neptune_graph.py
def test_import() -> None: from langchain_community.graphs import ( NeptuneGraph, # noqa: F401 NeptuneRdfGraph, # noqa: F401 )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/graphs/test_age_graph.py
import re import unittest from collections import namedtuple from typing import Any, Dict, List from langchain_community.graphs.age_graph import AGEGraph class TestAGEGraph(unittest.TestCase): def test_format_triples(self) -> None: test_input = [ {"start": "from_a", "type": "edge_a", "end": "to_a"}, {"start": "from_b", "type": "edge_b", "end": "to_b"}, ] expected = [ "(:`from_a`)-[:`edge_a`]->(:`to_a`)", "(:`from_b`)-[:`edge_b`]->(:`to_b`)", ] self.assertEqual(AGEGraph._format_triples(test_input), expected) def test_get_col_name(self) -> None: inputs = [ ("a", 1), ("a as b", 1), (" c ", 1), (" c as d ", 1), ("sum(a)", 1), ("sum(a) as b", 1), ("count(*)", 1), ("count(*) as cnt", 1), ("true", 1), ("false", 1), ("null", 1), ] expected = [ "a", "b", "c", "d", "sum_a", "b", "count_*", "cnt", "column_1", "column_1", "column_1", ] for idx, value in enumerate(inputs): self.assertEqual(AGEGraph._get_col_name(*value), expected[idx]) def test_wrap_query(self) -> None: inputs = [ """ MATCH (keanu:Person {name:'Keanu Reeves'}) RETURN keanu.name AS name, keanu.born AS born """, """ MERGE (n:a {id: 1}) """, ] expected = [ """ SELECT * FROM ag_catalog.cypher('test', $$ MATCH (keanu:Person {name:'Keanu Reeves'}) RETURN keanu.name AS name, keanu.born AS born $$) AS (name agtype, born agtype); """, """ SELECT * FROM ag_catalog.cypher('test', $$ MERGE (n:a {id: 1}) $$) AS (a agtype); """, ] for idx, value in enumerate(inputs): self.assertEqual( re.sub(r"\s", "", AGEGraph._wrap_query(value, "test")), re.sub(r"\s", "", expected[idx]), ) with self.assertRaises(ValueError): AGEGraph._wrap_query( """ MATCH () RETURN * """, "test", ) def test_format_properties(self) -> None: inputs: List[Dict[str, Any]] = [{}, {"a": "b"}, {"a": "b", "c": 1, "d": True}] expected = ["{}", '{`a`: "b"}', '{`a`: "b", `c`: 1, `d`: true}'] for idx, value in enumerate(inputs): self.assertEqual(AGEGraph._format_properties(value), expected[idx]) def test_clean_graph_labels(self) -> None: inputs = ["label", "label 1", "label#$"] expected = ["label", "label_1", "label_"] for idx, value in enumerate(inputs): self.assertEqual(AGEGraph.clean_graph_labels(value), expected[idx]) def test_record_to_dict(self) -> None: Record = namedtuple("Record", ["node1", "edge", "node2"]) r = Record( node1='{"id": 1, "label": "label1", "properties":' + ' {"prop": "a"}}::vertex', edge='{"id": 3, "label": "edge", "end_id": 2, ' + '"start_id": 1, "properties": {"test": "abc"}}::edge', node2='{"id": 2, "label": "label1", ' + '"properties": {"prop": "b"}}::vertex', ) result = AGEGraph._record_to_dict(r) expected = { "node1": {"prop": "a"}, "edge": ({"prop": "a"}, "edge", {"prop": "b"}), "node2": {"prop": "b"}, } self.assertEqual(result, expected) Record2 = namedtuple("Record2", ["string", "int", "float", "bool", "null"]) r2 = Record2('"test"', "1", "1.5", "true", None) result = AGEGraph._record_to_dict(r2) expected2 = { "string": "test", "int": 1, "float": 1.5, "bool": True, "null": None, } self.assertEqual(result, expected2)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agents/test_sql.py
from langchain_community.agent_toolkits import SQLDatabaseToolkit, create_sql_agent from langchain_community.utilities.sql_database import SQLDatabase from tests.unit_tests.llms.fake_llm import FakeLLM def test_create_sql_agent() -> None: db = SQLDatabase.from_uri("sqlite:///:memory:") queries = {"foo": "Final Answer: baz"} llm = FakeLLM(queries=queries, sequential_responses=True) toolkit = SQLDatabaseToolkit(db=db, llm=llm) agent_executor = create_sql_agent( llm=llm, toolkit=toolkit, ) assert agent_executor.run("hello") == "baz"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agents/test_openai_assistant.py
from typing import Any from unittest.mock import AsyncMock, MagicMock import pytest from langchain_community.agents.openai_assistant import OpenAIAssistantV2Runnable def _create_mock_client(*args: Any, use_async: bool = False, **kwargs: Any) -> Any: client = AsyncMock() if use_async else MagicMock() client.beta.threads.runs.create = MagicMock(return_value=None) # type: ignore return client @pytest.mark.requires("openai") def test_set_run_truncation_params() -> None: client = _create_mock_client() assistant = OpenAIAssistantV2Runnable(assistant_id="assistant_xyz", client=client) input = { "content": "AI question", "thread_id": "thread_xyz", "instructions": "You're a helpful assistant; answer questions as best you can.", "model": "gpt-4o", "max_prompt_tokens": 2000, "truncation_strategy": {"type": "last_messages", "last_messages": 10}, } expected_response = { "assistant_id": "assistant_xyz", "instructions": "You're a helpful assistant; answer questions as best you can.", "model": "gpt-4o", "max_prompt_tokens": 2000, "truncation_strategy": {"type": "last_messages", "last_messages": 10}, } assistant._create_run(input=input) _, kwargs = client.beta.threads.runs.create.call_args assert kwargs == expected_response
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agents/test_tools.py
"""Test tool utils.""" import unittest from typing import Any, Type from unittest.mock import MagicMock, Mock import pytest from langchain.agents.agent import Agent from langchain.agents.chat.base import ChatAgent from langchain.agents.conversational.base import ConversationalAgent from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.react.base import ReActDocstoreAgent, ReActTextWorldAgent from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent from langchain_core.tools import Tool, ToolException, tool from langchain_community.agent_toolkits.load_tools import load_tools from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler @pytest.mark.parametrize( "agent_cls", [ ZeroShotAgent, ChatAgent, ConversationalChatAgent, ConversationalAgent, ReActDocstoreAgent, ReActTextWorldAgent, SelfAskWithSearchAgent, ], ) def test_single_input_agent_raises_error_on_structured_tool( agent_cls: Type[Agent], ) -> None: """Test that older agents raise errors on older tools.""" @tool def the_tool(foo: str, bar: str) -> str: """Return the concat of foo and bar.""" return foo + bar with pytest.raises( ValueError, match=f"{agent_cls.__name__} does not support" # type: ignore f" multi-input tool the_tool.", ): agent_cls.from_llm_and_tools(MagicMock(), [the_tool]) # type: ignore def test_tool_no_args_specified_assumes_str() -> None: """Older tools could assume *args and **kwargs were passed in.""" def ambiguous_function(*args: Any, **kwargs: Any) -> str: """An ambiguously defined function.""" return args[0] some_tool = Tool( name="chain_run", description="Run the chain", func=ambiguous_function, ) expected_args = {"tool_input": {"type": "string"}} assert some_tool.args == expected_args assert some_tool.run("foobar") == "foobar" assert some_tool.run({"tool_input": "foobar"}) == "foobar" with pytest.raises(ToolException, match="Too many arguments to single-input tool"): some_tool.run({"tool_input": "foobar", "other_input": "bar"}) def test_load_tools_with_callback_manager_raises_deprecation_warning() -> None: """Test load_tools raises a deprecation for old callback manager kwarg.""" callback_manager = MagicMock() with pytest.warns(DeprecationWarning, match="callback_manager is deprecated"): tools = load_tools( ["requests_get"], callback_manager=callback_manager, allow_dangerous_tools=True, ) assert len(tools) == 1 assert tools[0].callbacks == callback_manager def test_load_tools_with_callbacks_is_called() -> None: """Test callbacks are called when provided to load_tools fn.""" callbacks = [FakeCallbackHandler()] tools = load_tools( ["requests_get"], # type: ignore callbacks=callbacks, # type: ignore allow_dangerous_tools=True, ) assert len(tools) == 1 # Patch the requests.get() method to return a mock response with unittest.mock.patch( "langchain.requests.TextRequestsWrapper.get", return_value=Mock(text="Hello world!"), ): result = tools[0].run("https://www.google.com") assert result.text == "Hello world!" assert callbacks[0].tool_starts == 1 assert callbacks[0].tool_ends == 1
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agents/test_react.py
"""Unit tests for ReAct.""" from typing import Union from langchain.agents.react.base import ReActChain, ReActDocstoreAgent from langchain_core.agents import AgentAction from langchain_core.documents import Document from langchain_core.language_models import FakeListLLM from langchain_core.prompts.prompt import PromptTemplate from langchain_core.tools import Tool from langchain_community.docstore.base import Docstore _PAGE_CONTENT = """This is a page about LangChain. It is a really cool framework. What isn't there to love about langchain? Made in 2022.""" _FAKE_PROMPT = PromptTemplate(input_variables=["input"], template="{input}") class FakeDocstore(Docstore): """Fake docstore for testing purposes.""" def search(self, search: str) -> Union[str, Document]: """Return the fake document.""" document = Document(page_content=_PAGE_CONTENT) return document def test_predict_until_observation_normal() -> None: """Test predict_until_observation when observation is made normally.""" outputs = ["foo\nAction: Search[foo]"] fake_llm = FakeListLLM(responses=outputs) tools = [ Tool(name="Search", func=lambda x: x, description="foo"), Tool(name="Lookup", func=lambda x: x, description="bar"), ] agent = ReActDocstoreAgent.from_llm_and_tools(fake_llm, tools) output = agent.plan([], input="") expected_output = AgentAction("Search", "foo", outputs[0]) assert output == expected_output def test_react_chain() -> None: """Test react chain.""" responses = [ "I should probably search\nAction: Search[langchain]", "I should probably lookup\nAction: Lookup[made]", "Ah okay now I know the answer\nAction: Finish[2022]", ] fake_llm = FakeListLLM(responses=responses) react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore()) output = react_chain.run("when was langchain made") assert output == "2022" def test_react_chain_bad_action() -> None: """Test react chain when bad action given.""" bad_action_name = "BadAction" responses = [ f"I'm turning evil\nAction: {bad_action_name}[langchain]", "Oh well\nAction: Finish[curses foiled again]", ] fake_llm = FakeListLLM(responses=responses) react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore()) output = react_chain.run("when was langchain made") assert output == "curses foiled again"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_hanavector.py
"""Test HanaVector functionality.""" from langchain_community.vectorstores import HanaDB def test_int_sanitation_with_illegal_value() -> None: """Test sanitization of int with illegal value""" successful = True try: HanaDB._sanitize_int("HUGO") successful = False except ValueError: pass assert successful def test_int_sanitation_with_legal_values() -> None: """Test sanitization of int with legal values""" assert HanaDB._sanitize_int(42) == 42 assert HanaDB._sanitize_int("21") == 21 def test_int_sanitation_with_negative_values() -> None: """Test sanitization of int with legal values""" assert HanaDB._sanitize_int(-1) == -1 assert HanaDB._sanitize_int("-1") == -1 def test_int_sanitation_with_illegal_negative_value() -> None: """Test sanitization of int with illegal value""" successful = True try: HanaDB._sanitize_int(-2) successful = False except ValueError: pass assert successful def test_parse_float_array_from_string() -> None: array_as_string = "[0.1, 0.2, 0.3]" assert HanaDB._parse_float_array_from_string(array_as_string) == [0.1, 0.2, 0.3]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py
import importlib.util from langchain_community.vectorstores.tencentvectordb import translate_filter def test_translate_filter() -> None: raw_filter = ( 'and(or(eq("artist", "Taylor Swift"), ' 'eq("artist", "Katy Perry")), lt("length", 180))' ) try: importlib.util.find_spec("langchain.chains.query_constructor.base") translate_filter(raw_filter) except ModuleNotFoundError: try: translate_filter(raw_filter) except ModuleNotFoundError: pass else: assert False else: result = translate_filter(raw_filter) expr = '(artist = "Taylor Swift" or artist = "Katy Perry") ' "and length < 180" assert expr == result def test_translate_filter_with_in_comparison() -> None: raw_filter = 'in("artist", ["Taylor Swift", "Katy Perry"])' try: importlib.util.find_spec("langchain.chains.query_constructor.base") translate_filter(raw_filter) except ModuleNotFoundError: try: translate_filter(raw_filter) except ModuleNotFoundError: pass else: assert False else: result = translate_filter(raw_filter) expr = 'artist in ("Taylor Swift", "Katy Perry")' assert expr == result
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_databricks_vector_search.py
import itertools import random import uuid from typing import Dict, List, Optional, Set from unittest.mock import MagicMock, patch import pytest from langchain_community.vectorstores import DatabricksVectorSearch from tests.integration_tests.vectorstores.fake_embeddings import ( FakeEmbeddings, fake_texts, ) DEFAULT_VECTOR_DIMENSION = 4 class FakeEmbeddingsWithDimension(FakeEmbeddings): """Fake embeddings functionality for testing.""" def __init__(self, dimension: int = DEFAULT_VECTOR_DIMENSION): super().__init__() self.dimension = dimension def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]: """Return simple embeddings.""" return [ [float(1.0)] * (self.dimension - 1) + [float(i)] for i in range(len(embedding_texts)) ] def embed_query(self, text: str) -> List[float]: """Return simple embeddings.""" return [float(1.0)] * (self.dimension - 1) + [float(0.0)] DEFAULT_EMBEDDING_MODEL = FakeEmbeddingsWithDimension() DEFAULT_TEXT_COLUMN = "text" DEFAULT_VECTOR_COLUMN = "text_vector" DEFAULT_PRIMARY_KEY = "id" DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS = { "name": "ml.llm.index", "endpoint_name": "vector_search_endpoint", "index_type": "DELTA_SYNC", "primary_key": DEFAULT_PRIMARY_KEY, "delta_sync_index_spec": { "source_table": "ml.llm.source_table", "pipeline_type": "CONTINUOUS", "embedding_source_columns": [ { "name": DEFAULT_TEXT_COLUMN, "embedding_model_endpoint_name": "openai-text-embedding", } ], }, } DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS = { "name": "ml.llm.index", "endpoint_name": "vector_search_endpoint", "index_type": "DELTA_SYNC", "primary_key": DEFAULT_PRIMARY_KEY, "delta_sync_index_spec": { "source_table": "ml.llm.source_table", "pipeline_type": "CONTINUOUS", "embedding_vector_columns": [ { "name": DEFAULT_VECTOR_COLUMN, "embedding_dimension": DEFAULT_VECTOR_DIMENSION, } ], }, } DIRECT_ACCESS_INDEX = { "name": "ml.llm.index", "endpoint_name": "vector_search_endpoint", "index_type": "DIRECT_ACCESS", "primary_key": DEFAULT_PRIMARY_KEY, "direct_access_index_spec": { "embedding_vector_columns": [ { "name": DEFAULT_VECTOR_COLUMN, "embedding_dimension": DEFAULT_VECTOR_DIMENSION, } ], "schema_json": f"{{" f'"{DEFAULT_PRIMARY_KEY}": "int", ' f'"feat1": "str", ' f'"feat2": "float", ' f'"text": "string", ' f'"{DEFAULT_VECTOR_COLUMN}": "array<float>"' f"}}", }, } ALL_INDEXES = [ DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS, DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX, ] EXAMPLE_SEARCH_RESPONSE = { "manifest": { "column_count": 3, "columns": [ {"name": DEFAULT_PRIMARY_KEY}, {"name": DEFAULT_TEXT_COLUMN}, {"name": "score"}, ], }, "result": { "row_count": len(fake_texts), "data_array": sorted( [[str(uuid.uuid4()), s, random.uniform(0, 1)] for s in fake_texts], key=lambda x: x[2], # type: ignore reverse=True, ), }, "next_page_token": "", } EXAMPLE_SEARCH_RESPONSE_FIXED_SCORE: Dict = { "manifest": { "column_count": 3, "columns": [ {"name": DEFAULT_PRIMARY_KEY}, {"name": DEFAULT_TEXT_COLUMN}, {"name": "score"}, ], }, "result": { "row_count": len(fake_texts), "data_array": sorted( [[str(uuid.uuid4()), s, 0.5] for s in fake_texts], key=lambda x: x[2], # type: ignore reverse=True, ), }, "next_page_token": "", } EXAMPLE_SEARCH_RESPONSE_WITH_EMBEDDING = { "manifest": { "column_count": 3, "columns": [ {"name": DEFAULT_PRIMARY_KEY}, {"name": DEFAULT_TEXT_COLUMN}, {"name": DEFAULT_VECTOR_COLUMN}, {"name": "score"}, ], }, "result": { "row_count": len(fake_texts), "data_array": sorted( [ [str(uuid.uuid4()), s, e, random.uniform(0, 1)] for s, e in zip( fake_texts, DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) ) ], key=lambda x: x[2], # type: ignore reverse=True, ), }, "next_page_token": "", } ALL_QUERY_TYPES = [ None, "ANN", "HYBRID", ] def mock_index(index_details: dict) -> MagicMock: from databricks.vector_search.client import VectorSearchIndex index = MagicMock(spec=VectorSearchIndex) index.describe.return_value = index_details return index def default_databricks_vector_search( index: MagicMock, columns: Optional[List[str]] = None ) -> DatabricksVectorSearch: return DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, columns=columns, ) @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_delta_sync_with_managed_embeddings() -> None: index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch(index) assert vectorsearch.index == index @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_delta_sync_with_self_managed_embeddings() -> None: index = mock_index(DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, ) assert vectorsearch.index == index @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_direct_access_index() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, ) assert vectorsearch.index == index @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_fail_no_index() -> None: with pytest.raises(TypeError): DatabricksVectorSearch() # type: ignore[call-arg] @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_fail_index_none() -> None: with pytest.raises(TypeError) as ex: DatabricksVectorSearch(None) assert "index must be of type VectorSearchIndex." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") def test_init_fail_text_column_mismatch() -> None: index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) with pytest.raises(ValueError) as ex: DatabricksVectorSearch( index, text_column="some_other_column", ) assert ( f"text_column 'some_other_column' does not match with the source column of the " f"index: '{DEFAULT_TEXT_COLUMN}'." in str(ex.value) ) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX] ) def test_init_fail_no_text_column(index_details: dict) -> None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, ) assert "`text_column` is required for this index." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize("index_details", [DIRECT_ACCESS_INDEX]) def test_init_fail_columns_not_in_schema(index_details: dict) -> None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, columns=["some_random_column"], ) assert "column 'some_random_column' is not in the index's schema." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX] ) def test_init_fail_no_embedding(index_details: dict) -> None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch( index, text_column=DEFAULT_TEXT_COLUMN, ) assert "`embedding` is required for this index." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX] ) def test_init_fail_embedding_dim_mismatch(index_details: dict) -> None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch( index, text_column=DEFAULT_TEXT_COLUMN, embedding=FakeEmbeddingsWithDimension(DEFAULT_VECTOR_DIMENSION + 1), ) assert ( f"embedding model's dimension '{DEFAULT_VECTOR_DIMENSION + 1}' does not match " f"with the index's dimension '{DEFAULT_VECTOR_DIMENSION}'" ) in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") def test_from_texts_not_supported() -> None: with pytest.raises(NotImplementedError) as ex: DatabricksVectorSearch.from_texts(fake_texts, FakeEmbeddings()) assert ( "`from_texts` is not supported. " "Use `add_texts` to add to existing direct-access index." ) in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS, DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS], ) def test_add_texts_not_supported_for_delta_sync_index(index_details: dict) -> None: index = mock_index(index_details) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.add_texts(fake_texts) assert "`add_texts` is only supported for direct-access index." in str(ex.value) def is_valid_uuid(val: str) -> bool: try: uuid.UUID(str(val)) return True except ValueError: return False @pytest.mark.requires("databricks", "databricks.vector_search") def test_add_texts() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, ) ids = [idx for idx, i in enumerate(fake_texts)] vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts, ids=ids) index.upsert.assert_called_once_with( [ { DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, } for text, vector, id_ in zip(fake_texts, vectors, ids) ] ) assert len(added_ids) == len(fake_texts) assert added_ids == ids @pytest.mark.requires("databricks", "databricks.vector_search") def test_add_texts_handle_single_text() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch( index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, ) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts[0]) index.upsert.assert_called_once_with( [ { DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, } for text, vector, id_ in zip(fake_texts, vectors, added_ids) ] ) assert len(added_ids) == 1 assert is_valid_uuid(added_ids[0]) @pytest.mark.requires("databricks", "databricks.vector_search") def test_add_texts_with_default_id() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts) index.upsert.assert_called_once_with( [ { DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, } for text, vector, id_ in zip(fake_texts, vectors, added_ids) ] ) assert len(added_ids) == len(fake_texts) assert all([is_valid_uuid(id_) for id_ in added_ids]) @pytest.mark.requires("databricks", "databricks.vector_search") def test_add_texts_with_metadata() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) metadatas = [{"feat1": str(i), "feat2": i + 1000} for i in range(len(fake_texts))] added_ids = vectorsearch.add_texts(fake_texts, metadatas=metadatas) index.upsert.assert_called_once_with( [ { DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector, **metadata, # type: ignore[arg-type] } for text, vector, id_, metadata in zip( fake_texts, vectors, added_ids, metadatas ) ] ) assert len(added_ids) == len(fake_texts) assert all([is_valid_uuid(id_) for id_ in added_ids]) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX], ) def test_embeddings_property(index_details: dict) -> None: index = mock_index(index_details) vectorsearch = default_databricks_vector_search(index) assert vectorsearch.embeddings == DEFAULT_EMBEDDING_MODEL @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS, DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS], ) def test_delete_not_supported_for_delta_sync_index(index_details: dict) -> None: index = mock_index(index_details) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.delete(["some id"]) assert "`delete` is only supported for direct-access index." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") def test_delete() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectorsearch.delete(["some id"]) index.delete.assert_called_once_with(["some id"]) @pytest.mark.requires("databricks", "databricks.vector_search") def test_delete_fail_no_ids() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.delete() assert "ids must be provided." in str(ex.value) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details, query_type", itertools.product(ALL_INDEXES, [None, "ANN"]) ) def test_similarity_search(index_details: dict, query_type: Optional[str]) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query = "foo" filters = {"some filter": True} limit = 7 search_result = vectorsearch.similarity_search( query, k=limit, filter=filters, query_type=query_type ) if index_details == DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS: index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_text=query, query_vector=None, filters=filters, num_results=limit, query_type=query_type, ) else: index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_text=None, query_vector=DEFAULT_EMBEDDING_MODEL.embed_query(query), filters=filters, num_results=limit, query_type=query_type, ) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts) assert all([DEFAULT_PRIMARY_KEY in d.metadata for d in search_result]) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize("index_details", ALL_INDEXES) def test_similarity_search_hybrid(index_details: dict) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query = "foo" filters = {"some filter": True} limit = 7 search_result = vectorsearch.similarity_search( query, k=limit, filter=filters, query_type="HYBRID" ) if index_details == DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS: index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_text=query, query_vector=None, filters=filters, num_results=limit, query_type="HYBRID", ) else: index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_text=query, query_vector=DEFAULT_EMBEDDING_MODEL.embed_query(query), filters=filters, num_results=limit, query_type="HYBRID", ) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts) assert all([DEFAULT_PRIMARY_KEY in d.metadata for d in search_result]) @pytest.mark.requires("databricks", "databricks.vector_search") def test_similarity_search_both_filter_and_filters_passed() -> None: index = mock_index(DIRECT_ACCESS_INDEX) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query = "foo" filter = {"some filter": True} filters = {"some other filter": False} vectorsearch.similarity_search(query, filter=filter, filters=filters) index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_vector=DEFAULT_EMBEDDING_MODEL.embed_query(query), # `filter` should prevail over `filters` filters=filter, num_results=4, query_text=None, query_type=None, ) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details, columns, expected_columns", [ (DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, None, {"id"}), (DIRECT_ACCESS_INDEX, None, {"id"}), ( DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, [DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN, DEFAULT_VECTOR_COLUMN], {"text_vector", "id"}, ), ( DIRECT_ACCESS_INDEX, [DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN, DEFAULT_VECTOR_COLUMN], {"text_vector", "id"}, ), ], ) def test_mmr_search( index_details: dict, columns: Optional[List[str]], expected_columns: Set[str] ) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE_WITH_EMBEDDING vectorsearch = default_databricks_vector_search(index, columns) query = fake_texts[0] filters = {"some filter": True} limit = 1 search_result = vectorsearch.max_marginal_relevance_search( query, k=limit, filters=filters ) assert [doc.page_content for doc in search_result] == [fake_texts[0]] assert [set(doc.metadata.keys()) for doc in search_result] == [expected_columns] @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX] ) def test_mmr_parameters(index_details: dict) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE_WITH_EMBEDDING query = fake_texts[0] limit = 1 fetch_k = 3 lambda_mult = 0.25 filters = {"some filter": True} with patch( "langchain_community.vectorstores.databricks_vector_search.maximal_marginal_relevance" ) as mock_mmr: mock_mmr.return_value = [2] retriever = default_databricks_vector_search(index).as_retriever( search_type="mmr", search_kwargs={ "k": limit, "fetch_k": fetch_k, "lambda_mult": lambda_mult, "filter": filters, }, ) search_result = retriever.invoke(query) mock_mmr.assert_called_once() assert mock_mmr.call_args[1]["lambda_mult"] == lambda_mult assert index.similarity_search.call_args[1]["num_results"] == fetch_k assert index.similarity_search.call_args[1]["filters"] == filters assert len(search_result) == limit @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details, threshold", itertools.product(ALL_INDEXES, [0.4, 0.5, 0.8]) ) def test_similarity_score_threshold(index_details: dict, threshold: float) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE_FIXED_SCORE uniform_response_score = EXAMPLE_SEARCH_RESPONSE_FIXED_SCORE["result"][ "data_array" ][0][2] query = fake_texts[0] limit = len(fake_texts) retriever = default_databricks_vector_search(index).as_retriever( search_type="similarity_score_threshold", search_kwargs={"k": limit, "score_threshold": threshold}, ) search_result = retriever.invoke(query) if uniform_response_score >= threshold: assert len(search_result) == len(fake_texts) else: assert len(search_result) == 0 @pytest.mark.requires("databricks", "databricks.vector_search") def test_standard_params() -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorstore = default_databricks_vector_search(index) retriever = vectorstore.as_retriever() ls_params = retriever._get_ls_params() assert ls_params == { "ls_retriever_name": "vectorstore", "ls_vector_store_provider": "DatabricksVectorSearch", "ls_embedding_provider": "FakeEmbeddingsWithDimension", } index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) vectorstore = default_databricks_vector_search(index) retriever = vectorstore.as_retriever() ls_params = retriever._get_ls_params() assert ls_params == { "ls_retriever_name": "vectorstore", "ls_vector_store_provider": "DatabricksVectorSearch", } @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details, query_type", itertools.product( [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX], [None, "ANN"] ), ) def test_similarity_search_by_vector( index_details: dict, query_type: Optional[str] ) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query("foo") filters = {"some filter": True} limit = 7 search_result = vectorsearch.similarity_search_by_vector( query_embedding, k=limit, filter=filters, query_type=query_type ) index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_vector=query_embedding, filters=filters, num_results=limit, query_type=query_type, query_text=None, ) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts) assert all([DEFAULT_PRIMARY_KEY in d.metadata for d in search_result]) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "index_details", [DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX] ) def test_similarity_search_by_vector_hybrid(index_details: dict) -> None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query("foo") filters = {"some filter": True} limit = 7 search_result = vectorsearch.similarity_search_by_vector( query_embedding, k=limit, filter=filters, query_type="HYBRID", query="foo" ) index.similarity_search.assert_called_once_with( columns=[DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_vector=query_embedding, filters=filters, num_results=limit, query_type="HYBRID", query_text="foo", ) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts) assert all([DEFAULT_PRIMARY_KEY in d.metadata for d in search_result]) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize("index_details", ALL_INDEXES) def test_similarity_search_empty_result(index_details: dict) -> None: index = mock_index(index_details) index.similarity_search.return_value = { "manifest": { "column_count": 3, "columns": [ {"name": DEFAULT_PRIMARY_KEY}, {"name": DEFAULT_TEXT_COLUMN}, {"name": "score"}, ], }, "result": { "row_count": 0, "data_array": [], }, "next_page_token": "", } vectorsearch = default_databricks_vector_search(index) search_result = vectorsearch.similarity_search("foo") assert len(search_result) == 0 @pytest.mark.requires("databricks", "databricks.vector_search") def test_similarity_search_by_vector_not_supported_for_managed_embedding() -> None: index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query("foo") filters = {"some filter": True} limit = 7 with pytest.raises(ValueError) as ex: vectorsearch.similarity_search_by_vector( query_embedding, k=limit, filters=filters ) assert ( "`similarity_search_by_vector` is not supported for index with " "Databricks-managed embeddings." in str(ex.value) ) @pytest.mark.requires("databricks", "databricks.vector_search") @pytest.mark.parametrize( "method", [ "similarity_search", "similarity_search_with_score", "similarity_search_by_vector", "similarity_search_by_vector_with_score", "max_marginal_relevance_search", "max_marginal_relevance_search_by_vector", ], ) def test_filter_arg_alias(method: str) -> None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) query = "foo" query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query("foo") filters = {"some filter": True} limit = 7 if "by_vector" in method: getattr(vectorsearch, method)(query_embedding, k=limit, filters=filters) else: getattr(vectorsearch, method)(query, k=limit, filters=filters) index_call_args = index.similarity_search.call_args[1] assert index_call_args["filters"] == filters
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_faiss.py
"""Test FAISS functionality.""" import datetime import math import tempfile from typing import Union import pytest from langchain_core.documents import Document from langchain_community.docstore.base import Docstore from langchain_community.docstore.in_memory import InMemoryDocstore from langchain_community.vectorstores.faiss import FAISS from langchain_community.vectorstores.utils import DistanceStrategy from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings _PAGE_CONTENT = """This is a page about LangChain. It is a really cool framework. What isn't there to love about langchain? Made in 2022.""" class FakeDocstore(Docstore): """Fake docstore for testing purposes.""" def search(self, search: str) -> Union[str, Document]: """Return the fake document.""" document = Document(page_content=_PAGE_CONTENT) return document @pytest.mark.requires("faiss") def test_faiss() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] # Retriever standard params retriever = docsearch.as_retriever() ls_params = retriever._get_ls_params() assert ls_params == { "ls_retriever_name": "vectorstore", "ls_vector_store_provider": "FAISS", "ls_embedding_provider": "FakeEmbeddings", } @pytest.mark.requires("faiss") async def test_faiss_afrom_texts() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = await docsearch.asimilarity_search("foo", k=1) assert output == [Document(page_content="foo")] @pytest.mark.requires("faiss") def test_faiss_vector_sim() -> None: """Test vector similarity.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.similarity_search_by_vector(query_vec, k=1) assert output == [Document(page_content="foo")] @pytest.mark.requires("faiss") async def test_faiss_async_vector_sim() -> None: """Test vector similarity.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.asimilarity_search_by_vector(query_vec, k=1) assert output == [Document(page_content="foo")] @pytest.mark.requires("faiss") def test_faiss_vector_sim_with_score_threshold() -> None: """Test vector similarity.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2) assert output == [Document(page_content="foo")] @pytest.mark.requires("faiss") async def test_faiss_vector_async_sim_with_score_threshold() -> None: """Test vector similarity.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.asimilarity_search_by_vector( query_vec, k=2, score_threshold=0.2 ) assert output == [Document(page_content="foo")] @pytest.mark.requires("faiss") def test_similarity_search_with_score_by_vector() -> None: """Test vector similarity with score by vector.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1) assert len(output) == 1 assert output[0][0] == Document(page_content="foo") @pytest.mark.requires("faiss") async def test_similarity_async_search_with_score_by_vector() -> None: """Test vector similarity with score by vector.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.asimilarity_search_with_score_by_vector(query_vec, k=1) assert len(output) == 1 assert output[0][0] == Document(page_content="foo") @pytest.mark.requires("faiss") def test_similarity_search_with_score_by_vector_with_score_threshold() -> None: """Test vector similarity with score by vector.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.similarity_search_with_score_by_vector( query_vec, k=2, score_threshold=0.2, ) assert len(output) == 1 assert output[0][0] == Document(page_content="foo") assert output[0][1] < 0.2 @pytest.mark.requires("faiss") async def test_sim_asearch_with_score_by_vector_with_score_threshold() -> None: """Test vector similarity with score by vector.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.asimilarity_search_with_score_by_vector( query_vec, k=2, score_threshold=0.2, ) assert len(output) == 1 assert output[0][0] == Document(page_content="foo") assert output[0][1] < 0.2 @pytest.mark.requires("faiss") def test_faiss_mmr() -> None: texts = ["foo", "foo", "fou", "foy"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) query_vec = FakeEmbeddings().embed_query(text="foo") # make sure we can have k > docstore size output = docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0][0] == Document(page_content="foo") assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo") @pytest.mark.requires("faiss") async def test_faiss_async_mmr() -> None: texts = ["foo", "foo", "fou", "foy"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) query_vec = await FakeEmbeddings().aembed_query(text="foo") # make sure we can have k > docstore size output = await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0][0] == Document(page_content="foo") assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo") @pytest.mark.requires("faiss") def test_faiss_mmr_with_metadatas() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0][0] == Document(page_content="foo", metadata={"page": 0}) assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo", metadata={"page": 0}) @pytest.mark.requires("faiss") async def test_faiss_async_mmr_with_metadatas() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0][0] == Document(page_content="foo", metadata={"page": 0}) assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo", metadata={"page": 0}) @pytest.mark.requires("faiss") def test_faiss_mmr_with_metadatas_and_filter() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter={"page": 1} ) assert len(output) == 1 assert output[0][0] == Document(page_content="foo", metadata={"page": 1}) assert output[0][1] == 0.0 assert output == docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter=lambda di: di["page"] == 1 ) @pytest.mark.requires("faiss") async def test_faiss_async_mmr_with_metadatas_and_filter() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter={"page": 1} ) assert len(output) == 1 assert output[0][0] == Document(page_content="foo", metadata={"page": 1}) assert output[0][1] == 0.0 assert ( output == await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter=lambda di: di["page"] == 1 ) ) @pytest.mark.requires("faiss") def test_faiss_mmr_with_metadatas_and_list_filter() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter={"page": [0, 1, 2]} ) assert len(output) == 3 assert output[0][0] == Document(page_content="foo", metadata={"page": 0}) assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo", metadata={"page": 0}) assert output == docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter=lambda di: di["page"] in [0, 1, 2] ) @pytest.mark.requires("faiss") async def test_faiss_async_mmr_with_metadatas_and_list_filter() -> None: texts = ["foo", "foo", "fou", "foy"] metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) query_vec = await FakeEmbeddings().aembed_query(text="foo") output = await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter={"page": [0, 1, 2]} ) assert len(output) == 3 assert output[0][0] == Document(page_content="foo", metadata={"page": 0}) assert output[0][1] == 0.0 assert output[1][0] != Document(page_content="foo", metadata={"page": 0}) assert output == ( await docsearch.amax_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1, filter=lambda di: di["page"] in [0, 1, 2] ) ) @pytest.mark.requires("faiss") def test_faiss_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": 0})] @pytest.mark.requires("faiss") async def test_faiss_async_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = await docsearch.asimilarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": 0})] @pytest.mark.requires("faiss") def test_faiss_with_metadatas_and_filter() -> None: texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1, filter={"page": 1}) # make sure it returns the result that matches the filter. # Not the one who's text matches better. assert output == [Document(page_content="bar", metadata={"page": 1})] assert output == docsearch.similarity_search( "foo", k=1, filter=lambda di: di["page"] == 1 ) @pytest.mark.requires("faiss") async def test_faiss_async_with_metadatas_and_filter() -> None: texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = await docsearch.asimilarity_search("foo", k=1, filter={"page": 1}) # make sure it returns the result that matches the filter. # Not the one who's text matches better. assert output == [Document(page_content="bar", metadata={"page": 1})] assert output == await docsearch.asimilarity_search( "foo", k=1, filter=lambda di: di["page"] == 1 ) @pytest.mark.requires("faiss") def test_faiss_with_metadatas_and_list_filter() -> None: texts = ["foo", "bar", "baz", "foo", "qux"] metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), docsearch.index_to_docstore_id[3]: Document( page_content="foo", metadata={"page": 3} ), docsearch.index_to_docstore_id[4]: Document( page_content="qux", metadata={"page": 3} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]}) assert output == [Document(page_content="foo", metadata={"page": 0})] assert output == docsearch.similarity_search( "foor", k=1, filter=lambda di: di["page"] in [0, 1, 2] ) @pytest.mark.requires("faiss") async def test_faiss_async_with_metadatas_and_list_filter() -> None: texts = ["foo", "bar", "baz", "foo", "qux"] metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), docsearch.index_to_docstore_id[3]: Document( page_content="foo", metadata={"page": 3} ), docsearch.index_to_docstore_id[4]: Document( page_content="qux", metadata={"page": 3} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = await docsearch.asimilarity_search("foor", k=1, filter={"page": [0, 1, 2]}) assert output == [Document(page_content="foo", metadata={"page": 0})] assert output == await docsearch.asimilarity_search( "foor", k=1, filter=lambda di: di["page"] in [0, 1, 2] ) @pytest.mark.requires("faiss") def test_faiss_search_not_found() -> None: """Test what happens when document is not found.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) # Get rid of the docstore to purposefully induce errors. docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch.similarity_search("foo") @pytest.mark.requires("faiss") async def test_faiss_async_search_not_found() -> None: """Test what happens when document is not found.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) # Get rid of the docstore to purposefully induce errors. docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): await docsearch.asimilarity_search("foo") @pytest.mark.requires("faiss") def test_faiss_add_texts() -> None: """Test end to end adding of texts.""" # Create initial doc store. texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) # Test adding a similar document as before. docsearch.add_texts(["foo"]) output = docsearch.similarity_search("foo", k=2) assert output == [Document(page_content="foo"), Document(page_content="foo")] @pytest.mark.requires("faiss") async def test_faiss_async_add_texts() -> None: """Test end to end adding of texts.""" # Create initial doc store. texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) # Test adding a similar document as before. await docsearch.aadd_texts(["foo"]) output = await docsearch.asimilarity_search("foo", k=2) assert output == [Document(page_content="foo"), Document(page_content="foo")] @pytest.mark.requires("faiss") def test_faiss_add_texts_not_supported() -> None: """Test adding of texts to a docstore that doesn't support it.""" docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {}) with pytest.raises(ValueError): docsearch.add_texts(["foo"]) @pytest.mark.requires("faiss") async def test_faiss_async_add_texts_not_supported() -> None: """Test adding of texts to a docstore that doesn't support it.""" docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {}) with pytest.raises(ValueError): await docsearch.aadd_texts(["foo"]) @pytest.mark.requires("faiss") def test_faiss_local_save_load() -> None: """Test end to end serialization.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder: docsearch.save_local(temp_folder) new_docsearch = FAISS.load_local( temp_folder, FakeEmbeddings(), allow_dangerous_deserialization=True ) assert new_docsearch.index is not None @pytest.mark.requires("faiss") async def test_faiss_async_local_save_load() -> None: """Test end to end serialization.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings()) temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder: docsearch.save_local(temp_folder) new_docsearch = FAISS.load_local( temp_folder, FakeEmbeddings(), allow_dangerous_deserialization=True ) assert new_docsearch.index is not None @pytest.mark.requires("faiss") def test_faiss_similarity_search_with_relevance_scores() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2), ) outputs = docsearch.similarity_search_with_relevance_scores("foo", k=1) output, score = outputs[0] assert output == Document(page_content="foo") assert score == 1.0 @pytest.mark.requires("faiss") async def test_faiss_async_similarity_search_with_relevance_scores() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2), ) outputs = await docsearch.asimilarity_search_with_relevance_scores("foo", k=1) output, score = outputs[0] assert output == Document(page_content="foo") assert score == 1.0 @pytest.mark.requires("faiss") def test_faiss_similarity_search_with_relevance_scores_with_threshold() -> None: """Test the similarity search with normalized similarities with score threshold.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2), ) outputs = docsearch.similarity_search_with_relevance_scores( "foo", k=2, score_threshold=0.5 ) assert len(outputs) == 1 output, score = outputs[0] assert output == Document(page_content="foo") assert score == 1.0 @pytest.mark.requires("faiss") async def test_faiss_asimilarity_search_with_relevance_scores_with_threshold() -> None: """Test the similarity search with normalized similarities with score threshold.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2), ) outputs = await docsearch.asimilarity_search_with_relevance_scores( "foo", k=2, score_threshold=0.5 ) assert len(outputs) == 1 output, score = outputs[0] assert output == Document(page_content="foo") assert score == 1.0 @pytest.mark.requires("faiss") def test_faiss_invalid_normalize_fn() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0 ) with pytest.warns(Warning, match="scores must be between"): docsearch.similarity_search_with_relevance_scores("foo", k=1) @pytest.mark.requires("faiss") async def test_faiss_async_invalid_normalize_fn() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = await FAISS.afrom_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0 ) with pytest.warns(Warning, match="scores must be between"): await docsearch.asimilarity_search_with_relevance_scores("foo", k=1) @pytest.mark.requires("faiss") def test_missing_normalize_score_fn() -> None: """Test doesn't perform similarity search without a valid distance strategy.""" texts = ["foo", "bar", "baz"] faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(), distance_strategy="fake") with pytest.raises(ValueError): faiss_instance.similarity_search_with_relevance_scores("foo", k=2) @pytest.mark.skip(reason="old relevance score feature") @pytest.mark.requires("faiss") def test_ip_score() -> None: embedding = FakeEmbeddings() vector = embedding.embed_query("hi") assert vector == [1] * 9 + [0], f"FakeEmbeddings() has changed, produced {vector}" db = FAISS.from_texts( ["sundays coming so i drive my car"], embedding=FakeEmbeddings(), distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT, ) scores = db.similarity_search_with_relevance_scores("sundays", k=1) assert len(scores) == 1, "only one vector should be in db" _, score = scores[0] assert ( score == 1 ), f"expected inner product of equivalent vectors to be 1, not {score}" @pytest.mark.requires("faiss") async def test_async_missing_normalize_score_fn() -> None: """Test doesn't perform similarity search without a valid distance strategy.""" texts = ["foo", "bar", "baz"] faiss_instance = await FAISS.afrom_texts( texts, FakeEmbeddings(), distance_strategy="fake" ) with pytest.raises(ValueError): await faiss_instance.asimilarity_search_with_relevance_scores("foo", k=2) @pytest.mark.requires("faiss") def test_delete() -> None: """Test the similarity search with normalized similarities.""" ids = ["a", "b", "c"] docsearch = FAISS.from_texts(["foo", "bar", "baz"], FakeEmbeddings(), ids=ids) docsearch.delete(ids[1:2]) result = docsearch.similarity_search("bar", k=2) assert sorted([d.page_content for d in result]) == ["baz", "foo"] assert docsearch.index_to_docstore_id == {0: ids[0], 1: ids[2]} @pytest.mark.requires("faiss") async def test_async_delete() -> None: """Test the similarity search with normalized similarities.""" ids = ["a", "b", "c"] docsearch = await FAISS.afrom_texts( ["foo", "bar", "baz"], FakeEmbeddings(), ids=ids ) docsearch.delete(ids[1:2]) result = await docsearch.asimilarity_search("bar", k=2) assert sorted([d.page_content for d in result]) == ["baz", "foo"] assert docsearch.index_to_docstore_id == {0: ids[0], 1: ids[2]} @pytest.mark.requires("faiss") def test_faiss_with_duplicate_ids() -> None: """Test whether FAISS raises an exception for duplicate ids.""" texts = ["foo", "bar", "baz"] duplicate_ids = ["id1", "id1", "id2"] with pytest.raises(ValueError) as exc_info: FAISS.from_texts(texts, FakeEmbeddings(), ids=duplicate_ids) assert "Duplicate ids found in the ids list." in str(exc_info.value)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_imports.py
from langchain_core.vectorstores import VectorStore from langchain_community import vectorstores from langchain_community.vectorstores import __all__, _module_lookup EXPECTED_ALL = [ "Aerospike", "AlibabaCloudOpenSearch", "AlibabaCloudOpenSearchSettings", "AnalyticDB", "Annoy", "ApacheDoris", "ApertureDB", "AstraDB", "AtlasDB", "AwaDB", "AzureCosmosDBNoSqlVectorSearch", "AzureCosmosDBVectorSearch", "AzureSearch", "BESVectorStore", "Bagel", "BaiduVectorDB", "BigQueryVectorSearch", "Cassandra", "Chroma", "Clarifai", "Clickhouse", "ClickhouseSettings", "CouchbaseVectorStore", "DashVector", "DatabricksVectorSearch", "DeepLake", "Dingo", "DistanceStrategy", "DocArrayHnswSearch", "DocArrayInMemorySearch", "DocumentDBVectorSearch", "DuckDB", "EcloudESVectorStore", "ElasticKnnSearch", "ElasticVectorSearch", "ElasticsearchStore", "Epsilla", "FAISS", "HanaDB", "Hologres", "InMemoryVectorStore", "InfinispanVS", "KDBAI", "Kinetica", "KineticaSettings", "LLMRails", "LanceDB", "Lantern", "ManticoreSearch", "ManticoreSearchSettings", "Marqo", "MatchingEngine", "Meilisearch", "Milvus", "MomentoVectorIndex", "MongoDBAtlasVectorSearch", "MyScale", "MyScaleSettings", "Neo4jVector", "NeuralDBClientVectorStore", "NeuralDBVectorStore", "OpenSearchVectorSearch", "OracleVS", "PGEmbedding", "PGVector", "PathwayVectorClient", "Pinecone", "Qdrant", "Redis", "Relyt", "Rockset", "SKLearnVectorStore", "SQLiteVec", "SQLiteVSS", "ScaNN", "SemaDB", "SingleStoreDB", "StarRocks", "SupabaseVectorStore", "SurrealDBStore", "Tair", "TencentVectorDB", "TiDBVectorStore", "Tigris", "TileDB", "TimescaleVector", "Typesense", "UpstashVectorStore", "USearch", "VDMS", "Vald", "Vearch", "Vectara", "VectorStore", "VespaStore", "VLite", "Weaviate", "Yellowbrick", "ZepVectorStore", "ZepCloudVectorStore", "Zilliz", ] def test_all_imports_exclusive() -> None: """Simple test to make sure all things can be imported.""" for cls in vectorstores.__all__: if cls not in [ "AlibabaCloudOpenSearchSettings", "ClickhouseSettings", "MyScaleSettings", "PathwayVectorClient", "DistanceStrategy", "KineticaSettings", "ManticoreSearchSettings", ]: assert issubclass(getattr(vectorstores, cls), VectorStore) def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_elasticsearch.py
"""Test Elasticsearch functionality.""" import pytest from langchain_community.vectorstores.elasticsearch import ( ApproxRetrievalStrategy, ElasticsearchStore, ) from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings @pytest.mark.requires("elasticsearch") def test_elasticsearch_hybrid_scores_guard() -> None: """Ensure an error is raised when search with score in hybrid mode because in this case Elasticsearch does not return any score. """ from elasticsearch import Elasticsearch query_string = "foo" embeddings = FakeEmbeddings() store = ElasticsearchStore( index_name="dummy_index", es_connection=Elasticsearch(hosts=["http://dummy-host:9200"]), embedding=embeddings, strategy=ApproxRetrievalStrategy(hybrid=True), ) with pytest.raises(ValueError): store.similarity_search_with_score(query_string) embedded_query = embeddings.embed_query(query_string) with pytest.raises(ValueError): store.similarity_search_by_vector_with_relevance_scores(embedded_query)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_pgvector.py
"""Test PGVector functionality.""" from unittest import mock from unittest.mock import Mock import pytest from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import pgvector _CONNECTION_STRING = pgvector.PGVector.connection_string_from_db_params( driver="psycopg2", host="localhost", port=5432, database="postgres", user="postgres", password="postgres", ) _EMBEDDING_FUNCTION = FakeEmbeddings(size=1536) @pytest.mark.requires("pgvector") @mock.patch("sqlalchemy.create_engine") def test_given_a_connection_is_provided_then_no_engine_should_be_created( create_engine: Mock, ) -> None: """When a connection is provided then no engine should be created.""" pgvector.PGVector( connection_string=_CONNECTION_STRING, embedding_function=_EMBEDDING_FUNCTION, connection=mock.MagicMock(), ) create_engine.assert_not_called() @pytest.mark.requires("pgvector") @mock.patch("sqlalchemy.create_engine") def test_given_no_connection_or_engine_args_provided_default_engine_should_be_used( create_engine: Mock, ) -> None: """When no connection or engine arguments are provided then the default configuration must be used.""" # noqa: E501 pgvector.PGVector( connection_string=_CONNECTION_STRING, embedding_function=_EMBEDDING_FUNCTION, ) create_engine.assert_called_with( url=_CONNECTION_STRING, ) @pytest.mark.requires("pgvector") @mock.patch("sqlalchemy.create_engine") def test_given_engine_args_are_provided_then_they_should_be_used( create_engine: Mock, ) -> None: """When engine arguments are provided then they must be used to create the underlying engine.""" # noqa: E501 engine_args = { "pool_size": 5, "max_overflow": 10, "pool_recycle": -1, "pool_use_lifo": False, "pool_pre_ping": False, "pool_timeout": 30, } pgvector.PGVector( connection_string=_CONNECTION_STRING, embedding_function=_EMBEDDING_FUNCTION, engine_args=engine_args, ) create_engine.assert_called_with( url=_CONNECTION_STRING, **engine_args, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_indexing_docs.py
from langchain_core.vectorstores import VectorStore import langchain_community.vectorstores def test_compatible_vectorstore_documentation() -> None: """Test which vectorstores are compatible with the indexing API. This serves as a reminder to update the documentation in [1] that specifies which vectorstores are compatible with the indexing API. Ideally if a developer adds a new vectorstore or modifies an existing one in such a way that affects its compatibility with the Indexing API, he/she will see this failed test case and 1) update docs in [1] and 2) update the `documented` dict in this test case. [1] langchain/docs/docs/modules/data_connection/indexing.ipynb """ # Check if a vectorstore is compatible with the indexing API def check_compatibility(vector_store: VectorStore) -> bool: """Check if a vectorstore is compatible with the indexing API.""" methods = ["delete", "add_documents"] for method in methods: if not hasattr(vector_store, method): return False # Checking if the vectorstore has overridden the default delete method # implementation which just raises a NotImplementedError if getattr(vector_store, "delete") == VectorStore.delete: return False return True # Check all vector store classes for compatibility compatible = set() for class_name in langchain_community.vectorstores.__all__: # Get the definition of the class cls = getattr(langchain_community.vectorstores, class_name) # If the class corresponds to a vectorstore, check its compatibility if issubclass(cls, VectorStore): is_compatible = check_compatibility(cls) if is_compatible: compatible.add(class_name) # These are mentioned in the indexing.ipynb documentation documented = { "Aerospike", "AnalyticDB", "ApertureDB", "AstraDB", "AzureCosmosDBVectorSearch", "AzureCosmosDBNoSqlVectorSearch", "AzureSearch", "AwaDB", "Bagel", "BESVectorStore", "BigQueryVectorSearch", "Cassandra", "Chroma", "CouchbaseVectorStore", "DashVector", "DatabricksVectorSearch", "TiDBVectorStore", "DeepLake", "Dingo", "DocumentDBVectorSearch", "ElasticVectorSearch", "ElasticsearchStore", "FAISS", "HanaDB", "InMemoryVectorStore", "LanceDB", "Milvus", "MomentoVectorIndex", "MyScale", "OpenSearchVectorSearch", "OracleVS", "PGVector", "Pinecone", "Qdrant", "Redis", "Relyt", "Rockset", "ScaNN", "SemaDB", "SingleStoreDB", "SupabaseVectorStore", "SurrealDBStore", "TileDB", "TimescaleVector", "TencentVectorDB", "UpstashVectorStore", "EcloudESVectorStore", "Vald", "VDMS", "Vearch", "Vectara", "VespaStore", "VLite", "Weaviate", "Yellowbrick", "ZepVectorStore", "ZepCloudVectorStore", "Zilliz", "Lantern", "OpenSearchVectorSearch", } assert compatible == documented
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_sklearn.py
"""Test SKLearnVectorStore functionality.""" from pathlib import Path import pytest from langchain_community.vectorstores import SKLearnVectorStore from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings @pytest.mark.requires("numpy", "sklearn") def test_sklearn() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.similarity_search("foo", k=1) assert len(output) == 1 assert output[0].page_content == "foo" @pytest.mark.requires("numpy", "sklearn") def test_sklearn_with_metadatas() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = SKLearnVectorStore.from_texts( texts, FakeEmbeddings(), metadatas=metadatas, ) output = docsearch.similarity_search("foo", k=1) assert output[0].metadata["page"] == "0" @pytest.mark.requires("numpy", "sklearn") def test_sklearn_with_metadatas_with_scores() -> None: """Test end to end construction and scored search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = SKLearnVectorStore.from_texts( texts, FakeEmbeddings(), metadatas=metadatas, ) output = docsearch.similarity_search_with_relevance_scores("foo", k=1) assert len(output) == 1 doc, score = output[0] assert doc.page_content == "foo" assert doc.metadata["page"] == "0" assert score == 1 @pytest.mark.requires("numpy", "sklearn") def test_sklearn_with_persistence(tmpdir: Path) -> None: """Test end to end construction and search, with persistence.""" persist_path = tmpdir / "foo.parquet" texts = ["foo", "bar", "baz"] docsearch = SKLearnVectorStore.from_texts( texts, FakeEmbeddings(), persist_path=str(persist_path), serializer="json", ) output = docsearch.similarity_search("foo", k=1) assert len(output) == 1 assert output[0].page_content == "foo" docsearch.persist() # Get a new VectorStore from the persisted directory docsearch = SKLearnVectorStore( FakeEmbeddings(), persist_path=str(persist_path), serializer="json" ) output = docsearch.similarity_search("foo", k=1) assert len(output) == 1 assert output[0].page_content == "foo" @pytest.mark.requires("numpy", "sklearn") def test_sklearn_mmr() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3) assert len(output) == 1 assert output[0].page_content == "foo" @pytest.mark.requires("numpy", "sklearn") def test_sklearn_mmr_by_vector() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] embeddings = FakeEmbeddings() docsearch = SKLearnVectorStore.from_texts(texts, embeddings) embedded_query = embeddings.embed_query("foo") output = docsearch.max_marginal_relevance_search_by_vector( embedded_query, k=1, fetch_k=3 ) assert len(output) == 1 assert output[0].page_content == "foo"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_aerospike.py
import sys from typing import Any, Callable, Generator from unittest.mock import MagicMock, Mock, call import pytest from langchain_core.documents import Document from langchain_community.vectorstores.aerospike import Aerospike from langchain_community.vectorstores.utils import DistanceStrategy from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings pytestmark = pytest.mark.requires("aerospike_vector_search") and pytest.mark.skipif( sys.version_info < (3, 9), reason="requires python3.9 or higher" ) @pytest.fixture(scope="module") def client() -> Generator[Any, None, None]: try: from aerospike_vector_search import Client from aerospike_vector_search.types import HostPort except ImportError: pytest.skip("aerospike_vector_search not installed") client = Client( seeds=[ HostPort(host="dummy-host", port=3000), ], ) yield client client.close() @pytest.fixture def mock_client(mocker: Any) -> None: try: from aerospike_vector_search import Client except ImportError: pytest.skip("aerospike_vector_search not installed") return mocker.MagicMock(Client) def test_aerospike(client: Any) -> None: """Ensure an error is raised when search with score in hybrid mode because in this case Elasticsearch does not return any score. """ from aerospike_vector_search import AVSError query_string = "foo" embedding = FakeEmbeddings() store = Aerospike( client=client, embedding=embedding, text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) # TODO: Remove grpc import when aerospike_vector_search wraps grpc errors with pytest.raises(AVSError): store.similarity_search_by_vector(embedding.embed_query(query_string)) def test_init_aerospike_distance(client: Any) -> None: from aerospike_vector_search.types import VectorDistanceMetric embedding = FakeEmbeddings() aerospike = Aerospike( client=client, embedding=embedding, text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=VectorDistanceMetric.COSINE, ) assert aerospike._distance_strategy == DistanceStrategy.COSINE def test_init_bad_embedding(client: Any) -> None: def bad_embedding() -> None: return None with pytest.warns( UserWarning, match=( "Passing in `embedding` as a Callable is deprecated. Please pass" + " in an Embeddings object instead." ), ): Aerospike( client=client, embedding=bad_embedding, text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) def test_init_bad_client(client: Any) -> None: class BadClient: pass with pytest.raises( ValueError, match=( "client should be an instance of aerospike_vector_search.Client," + " got <class 'tests.unit_tests.vectorstores.test_aerospike." + "test_init_bad_client.<locals>.BadClient'>" ), ): Aerospike( client=BadClient(), embedding=FakeEmbeddings(), text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) def test_convert_distance_strategy(client: Any) -> None: from aerospike_vector_search.types import VectorDistanceMetric aerospike = Aerospike( client=client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) converted_strategy = aerospike.convert_distance_strategy( VectorDistanceMetric.COSINE ) assert converted_strategy == DistanceStrategy.COSINE converted_strategy = aerospike.convert_distance_strategy( VectorDistanceMetric.DOT_PRODUCT ) assert converted_strategy == DistanceStrategy.DOT_PRODUCT converted_strategy = aerospike.convert_distance_strategy( VectorDistanceMetric.SQUARED_EUCLIDEAN ) assert converted_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE with pytest.raises(ValueError): aerospike.convert_distance_strategy(VectorDistanceMetric.HAMMING) def test_add_texts_wait_for_index_error(client: Any) -> None: aerospike = Aerospike( client=client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", # index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) with pytest.raises( ValueError, match="if wait_for_index is True, index_name must be provided" ): aerospike.add_texts(["foo", "bar"], wait_for_index=True) def test_add_texts_returns_ids(mock_client: MagicMock) -> None: aerospike = Aerospike( client=mock_client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) excepted = ["0", "1"] actual = aerospike.add_texts( ["foo", "bar"], metadatas=[{"foo": 0}, {"bar": 1}], ids=["0", "1"], set_name="otherset", index_name="dummy_index", wait_for_index=True, ) assert excepted == actual mock_client.upsert.assert_has_calls( calls=[ call( namespace="test", key="0", set_name="otherset", record_data={ "_id": "0", "text": "foo", "vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], "foo": 0, }, ), call( namespace="test", key="1", set_name="otherset", record_data={ "_id": "1", "text": "bar", "vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "bar": 1, }, ), ] ) mock_client.wait_for_index_completion.assert_called_once_with( namespace="test", name="dummy_index", ) def test_delete_returns_false(mock_client: MagicMock) -> None: from aerospike_vector_search import AVSServerError mock_client.delete.side_effect = Mock(side_effect=AVSServerError(rpc_error="")) aerospike = Aerospike( client=mock_client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) assert not aerospike.delete(["foo", "bar"], set_name="testset") mock_client.delete.assert_called_once_with( namespace="test", key="foo", set_name="testset" ) def test_similarity_search_by_vector_with_score_missing_index_name( client: Any, ) -> None: aerospike = Aerospike( client=client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", # index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) with pytest.raises(ValueError, match="index_name must be provided"): aerospike.similarity_search_by_vector_with_score([1.0, 2.0, 3.0]) def test_similarity_search_by_vector_with_score_filters_missing_text_key( mock_client: MagicMock, ) -> None: from aerospike_vector_search.types import Neighbor text_key = "text" mock_client.vector_search.return_value = [ Neighbor(key="key1", fields={text_key: 1}, distance=1.0), Neighbor(key="key2", fields={}, distance=0.0), Neighbor(key="key3", fields={text_key: 3}, distance=3.0), ] aerospike = Aerospike( client=mock_client, embedding=FakeEmbeddings(), text_key=text_key, vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) actual = aerospike.similarity_search_by_vector_with_score( [1.0, 2.0, 3.0], k=10, metadata_keys=["foo"] ) expected = [ (Document(page_content="1"), 1.0), (Document(page_content="3"), 3.0), ] mock_client.vector_search.assert_called_once_with( index_name="dummy_index", namespace="test", query=[1.0, 2.0, 3.0], limit=10, field_names=[text_key, "foo"], ) assert expected == actual def test_similarity_search_by_vector_with_score_overwrite_index_name( mock_client: MagicMock, ) -> None: mock_client.vector_search.return_value = [] aerospike = Aerospike( client=mock_client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=DistanceStrategy.COSINE, ) aerospike.similarity_search_by_vector_with_score( [1.0, 2.0, 3.0], index_name="other_index" ) mock_client.vector_search.assert_called_once_with( index_name="other_index", namespace="test", query=[1.0, 2.0, 3.0], limit=4, field_names=None, ) @pytest.mark.parametrize( "distance_strategy,expected_fn", [ (DistanceStrategy.COSINE, Aerospike._cosine_relevance_score_fn), (DistanceStrategy.EUCLIDEAN_DISTANCE, Aerospike._euclidean_relevance_score_fn), (DistanceStrategy.DOT_PRODUCT, Aerospike._max_inner_product_relevance_score_fn), (DistanceStrategy.JACCARD, ValueError), ], ) def test_select_relevance_score_fn( client: Any, distance_strategy: DistanceStrategy, expected_fn: Callable ) -> None: aerospike = Aerospike( client=client, embedding=FakeEmbeddings(), text_key="text", vector_key="vector", index_name="dummy_index", namespace="test", set_name="testset", distance_strategy=distance_strategy, ) if expected_fn is ValueError: with pytest.raises(ValueError): aerospike._select_relevance_score_fn() else: fn = aerospike._select_relevance_score_fn() assert fn == expected_fn
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_neo4j.py
"""Test Neo4j functionality.""" from langchain_community.vectorstores.neo4j_vector import ( dict_to_yaml_str, remove_lucene_chars, ) def test_escaping_lucene() -> None: """Test escaping lucene characters""" assert remove_lucene_chars("Hello+World") == "Hello World" assert remove_lucene_chars("Hello World\\") == "Hello World" assert ( remove_lucene_chars("It is the end of the world. Take shelter!") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter&&") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("Bill&&Melinda Gates Foundation") == "Bill Melinda Gates Foundation" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter(&&)") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter??") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter^") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter+") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter-") == "It is the end of the world. Take shelter" ) assert ( remove_lucene_chars("It is the end of the world. Take shelter~") == "It is the end of the world. Take shelter" ) def test_converting_to_yaml() -> None: example_dict = { "name": "John Doe", "age": 30, "skills": ["Python", "Data Analysis", "Machine Learning"], "location": {"city": "Ljubljana", "country": "Slovenia"}, } yaml_str = dict_to_yaml_str(example_dict) expected_output = ( "name: John Doe\nage: 30\nskills:\n- Python\n- " "Data Analysis\n- Machine Learning\nlocation:\n city: Ljubljana\n" " country: Slovenia\n" ) assert yaml_str == expected_output
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_inmemory.py
from pathlib import Path from typing import Any import pytest from langchain_core.documents import Document from langchain_tests.integration_tests.vectorstores import ( AsyncReadWriteTestSuite, ReadWriteTestSuite, ) from langchain_community.vectorstores.inmemory import InMemoryVectorStore from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) class AnyStr(str): def __eq__(self, other: Any) -> bool: return isinstance(other, str) def _AnyDocument(**kwargs: Any) -> Document: """Create a Document with an any id field.""" doc = Document(**kwargs) doc.id = AnyStr() return doc class TestInMemoryReadWriteTestSuite(ReadWriteTestSuite): @pytest.fixture def vectorstore(self) -> InMemoryVectorStore: return InMemoryVectorStore(embedding=self.get_embeddings()) class TestAsyncInMemoryReadWriteTestSuite(AsyncReadWriteTestSuite): @pytest.fixture async def vectorstore(self) -> InMemoryVectorStore: return InMemoryVectorStore(embedding=self.get_embeddings()) async def test_inmemory() -> None: """Test end to end construction and search.""" store = await InMemoryVectorStore.afrom_texts( ["foo", "bar", "baz"], ConsistentFakeEmbeddings() ) output = await store.asimilarity_search("foo", k=1) assert output == [_AnyDocument(page_content="foo")] output = await store.asimilarity_search("bar", k=2) assert output == [ _AnyDocument(page_content="bar"), _AnyDocument(page_content="baz"), ] output2 = await store.asimilarity_search_with_score("bar", k=2) assert output2[0][1] > output2[1][1] async def test_add_by_ids() -> None: vectorstore = InMemoryVectorStore(embedding=ConsistentFakeEmbeddings()) # Check sync version ids1 = vectorstore.add_texts(["foo", "bar", "baz"], ids=["1", "2", "3"]) assert ids1 == ["1", "2", "3"] assert sorted(vectorstore.store.keys()) == ["1", "2", "3"] ids2 = await vectorstore.aadd_texts(["foo", "bar", "baz"], ids=["4", "5", "6"]) assert ids2 == ["4", "5", "6"] assert sorted(vectorstore.store.keys()) == ["1", "2", "3", "4", "5", "6"] async def test_inmemory_mmr() -> None: texts = ["foo", "foo", "fou", "foy"] docsearch = await InMemoryVectorStore.afrom_texts(texts, ConsistentFakeEmbeddings()) # make sure we can k > docstore size output = await docsearch.amax_marginal_relevance_search( "foo", k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0] == _AnyDocument(page_content="foo") assert output[1] == _AnyDocument(page_content="foy") async def test_inmemory_dump_load(tmp_path: Path) -> None: """Test end to end construction and search.""" embedding = ConsistentFakeEmbeddings() store = await InMemoryVectorStore.afrom_texts(["foo", "bar", "baz"], embedding) output = await store.asimilarity_search("foo", k=1) test_file = str(tmp_path / "test.json") store.dump(test_file) loaded_store = InMemoryVectorStore.load(test_file, embedding) loaded_output = await loaded_store.asimilarity_search("foo", k=1) assert output == loaded_output async def test_inmemory_filter() -> None: """Test end to end construction and search.""" store = await InMemoryVectorStore.afrom_texts( ["foo", "bar"], ConsistentFakeEmbeddings(), [{"id": 1}, {"id": 2}], ) output = await store.asimilarity_search( "baz", filter=lambda doc: doc.metadata["id"] == 1 ) assert output == [_AnyDocument(page_content="foo", metadata={"id": 1})]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_utils.py
"""Test vector store utility functions.""" import numpy as np from langchain_core.documents import Document from langchain_community.vectorstores.utils import ( filter_complex_metadata, maximal_marginal_relevance, ) def test_maximal_marginal_relevance_lambda_zero() -> None: query_embedding = np.random.random(size=5) embedding_list = [query_embedding, query_embedding, np.zeros(5)] expected = [0, 2] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=0, k=2 ) assert expected == actual def test_maximal_marginal_relevance_lambda_one() -> None: query_embedding = np.random.random(size=5) embedding_list = [query_embedding, query_embedding, np.zeros(5)] expected = [0, 1] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=1, k=2 ) assert expected == actual def test_maximal_marginal_relevance() -> None: query_embedding = np.array([1, 0]) # Vectors that are 30, 45 and 75 degrees from query vector (cosine similarity of # 0.87, 0.71, 0.26) and the latter two are 15 and 60 degree from the first # (cosine similarity 0.97 and 0.71). So for 3rd vector be chosen, must be case that # 0.71lambda - 0.97(1 - lambda) < 0.26lambda - 0.71(1-lambda) # -> lambda ~< .26 / .71 embedding_list = [[3**0.5, 1], [1, 1], [1, 2 + (3**0.5)]] expected = [0, 2] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=(25 / 71), k=2 ) assert expected == actual expected = [0, 1] actual = maximal_marginal_relevance( query_embedding, embedding_list, lambda_mult=(27 / 71), k=2 ) assert expected == actual def test_maximal_marginal_relevance_query_dim() -> None: query_embedding = np.random.random(size=5) query_embedding_2d = query_embedding.reshape((1, 5)) embedding_list = np.random.random(size=(4, 5)).tolist() first = maximal_marginal_relevance(query_embedding, embedding_list) second = maximal_marginal_relevance(query_embedding_2d, embedding_list) assert first == second def test_filter_list_metadata() -> None: documents = [ Document( page_content="", metadata={ "key1": "this is a string!", "key2": ["a", "list", "of", "strings"], }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": {"foo"}, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": {"foo": "bar"}, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": True, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": 1, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": 1.0, }, ), Document( page_content="", metadata={ "key1": "this is another string!", "key2": "foo", }, ), ] updated_documents = filter_complex_metadata(documents) filtered_metadata = [doc.metadata for doc in updated_documents] assert filtered_metadata == [ {"key1": "this is a string!"}, {"key1": "this is another string!"}, {"key1": "this is another string!"}, {"key1": "this is another string!", "key2": True}, {"key1": "this is another string!", "key2": 1}, {"key1": "this is another string!", "key2": 1.0}, {"key1": "this is another string!", "key2": "foo"}, ]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/test_azure_search.py
import json from typing import Any, Dict, List, Optional from unittest.mock import patch import pytest from langchain_community.vectorstores.azuresearch import AzureSearch from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings DEFAULT_VECTOR_DIMENSION = 4 class FakeEmbeddingsWithDimension(FakeEmbeddings): """Fake embeddings functionality for testing.""" def __init__(self, dimension: int = DEFAULT_VECTOR_DIMENSION): super().__init__() self.dimension = dimension def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]: """Return simple embeddings.""" return [ [float(1.0)] * (self.dimension - 1) + [float(i)] for i in range(len(embedding_texts)) ] def embed_query(self, text: str) -> List[float]: """Return simple embeddings.""" return [float(1.0)] * (self.dimension - 1) + [float(0.0)] DEFAULT_INDEX_NAME = "langchain-index" DEFAULT_ENDPOINT = "https://my-search-service.search.windows.net" DEFAULT_KEY = "mykey" DEFAULT_ACCESS_TOKEN = "myaccesstoken1" DEFAULT_EMBEDDING_MODEL = FakeEmbeddingsWithDimension() def mock_default_index(*args, **kwargs): # type: ignore[no-untyped-def] from azure.search.documents.indexes.models import ( ExhaustiveKnnAlgorithmConfiguration, ExhaustiveKnnParameters, HnswAlgorithmConfiguration, HnswParameters, SearchField, SearchFieldDataType, SearchIndex, VectorSearch, VectorSearchAlgorithmMetric, VectorSearchProfile, ) return SearchIndex( name=DEFAULT_INDEX_NAME, fields=[ SearchField( name="id", type=SearchFieldDataType.String, key=True, hidden=False, searchable=False, filterable=True, sortable=False, facetable=False, ), SearchField( name="content", type=SearchFieldDataType.String, key=False, hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, ), SearchField( name="content_vector", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), searchable=True, vector_search_dimensions=4, vector_search_profile_name="myHnswProfile", ), SearchField( name="metadata", type="Edm.String", key=False, hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, ), ], vector_search=VectorSearch( profiles=[ VectorSearchProfile( name="myHnswProfile", algorithm_configuration_name="default" ), VectorSearchProfile( name="myExhaustiveKnnProfile", algorithm_configuration_name="default_exhaustive_knn", ), ], algorithms=[ HnswAlgorithmConfiguration( name="default", parameters=HnswParameters( m=4, ef_construction=400, ef_search=500, metric=VectorSearchAlgorithmMetric.COSINE, ), ), ExhaustiveKnnAlgorithmConfiguration( name="default_exhaustive_knn", parameters=ExhaustiveKnnParameters( metric=VectorSearchAlgorithmMetric.COSINE ), ), ], ), ) def create_vector_store( additional_search_client_options: Optional[Dict[str, Any]] = None, ) -> AzureSearch: return AzureSearch( azure_search_endpoint=DEFAULT_ENDPOINT, azure_search_key=DEFAULT_KEY, azure_ad_access_token=DEFAULT_ACCESS_TOKEN, index_name=DEFAULT_INDEX_NAME, embedding_function=DEFAULT_EMBEDDING_MODEL, additional_search_client_options=additional_search_client_options, ) @pytest.mark.requires("azure.search.documents") def test_init_existing_index() -> None: from azure.search.documents.indexes import SearchIndexClient def mock_create_index() -> None: pytest.fail("Should not create index in this test") with patch.multiple( SearchIndexClient, get_index=mock_default_index, create_index=mock_create_index ): vector_store = create_vector_store() assert vector_store.client is not None @pytest.mark.requires("azure.search.documents") def test_init_new_index() -> None: from azure.core.exceptions import ResourceNotFoundError from azure.search.documents.indexes import SearchIndexClient from azure.search.documents.indexes.models import SearchIndex def no_index(self, name: str): # type: ignore[no-untyped-def] raise ResourceNotFoundError created_index: Optional[SearchIndex] = None def mock_create_index(self, index): # type: ignore[no-untyped-def] nonlocal created_index created_index = index with patch.multiple( SearchIndexClient, get_index=no_index, create_index=mock_create_index ): vector_store = create_vector_store() assert vector_store.client is not None assert created_index is not None assert json.dumps(created_index.as_dict()) == json.dumps( mock_default_index().as_dict() ) @pytest.mark.requires("azure.search.documents") def test_additional_search_options() -> None: from azure.search.documents.indexes import SearchIndexClient def mock_create_index() -> None: pytest.fail("Should not create index in this test") with patch.multiple( SearchIndexClient, get_index=mock_default_index, create_index=mock_create_index ): vector_store = create_vector_store( additional_search_client_options={"api_version": "test"} ) assert vector_store.client is not None assert vector_store.client._api_version == "test" @pytest.mark.requires("azure.search.documents") def test_ids_used_correctly() -> None: """Check whether vector store uses the document ids when provided with them.""" from azure.search.documents import SearchClient from azure.search.documents.indexes import SearchIndexClient from langchain_core.documents import Document class Response: def __init__(self) -> None: self.succeeded: bool = True def mock_upload_documents(self, documents: List[object]) -> List[Response]: # type: ignore[no-untyped-def] # assume all documents uploaded successfuly response = [Response() for _ in documents] return response documents = [ Document( page_content="page zero Lorem Ipsum", metadata={"source": "document.pdf", "page": 0, "id": "ID-document-1"}, ), Document( page_content="page one Lorem Ipsum", metadata={"source": "document.pdf", "page": 1, "id": "ID-document-2"}, ), ] ids_provided = [i.metadata.get("id") for i in documents] with patch.object( SearchClient, "upload_documents", mock_upload_documents ), patch.object(SearchIndexClient, "get_index", mock_default_index): vector_store = create_vector_store() ids_used_at_upload = vector_store.add_documents(documents, ids=ids_provided) assert len(ids_provided) == len(ids_used_at_upload) assert ids_provided == ids_used_at_upload
0
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py
import pytest from langchain_community.vectorstores.redis.schema import ( FlatVectorField, HNSWVectorField, NumericFieldSchema, RedisModel, RedisVectorField, TagFieldSchema, TextFieldSchema, read_schema, ) def test_text_field_schema_creation() -> None: """Test creating a text field with default parameters.""" field = TextFieldSchema(name="example") assert field.name == "example" assert field.weight == 1 # default value assert field.no_stem is False # default value def test_tag_field_schema_creation() -> None: """Test creating a tag field with custom parameters.""" field = TagFieldSchema(name="tag", separator="|") assert field.name == "tag" assert field.separator == "|" def test_numeric_field_schema_creation() -> None: """Test creating a numeric field with default parameters.""" field = NumericFieldSchema(name="numeric") assert field.name == "numeric" assert field.no_index is False # default value def test_redis_vector_field_validation() -> None: """Test validation for RedisVectorField's datatype.""" from pydantic import ValidationError with pytest.raises(ValidationError): RedisVectorField( name="vector", dims=128, algorithm="INVALID_ALGO", datatype="INVALID_TYPE" ) # Test creating a valid RedisVectorField vector_field = RedisVectorField( name="vector", dims=128, algorithm="SOME_ALGO", datatype="FLOAT32" ) assert vector_field.datatype == "FLOAT32" def test_flat_vector_field_defaults() -> None: """Test defaults for FlatVectorField.""" flat_vector_field_data = { "name": "example", "dims": 100, "algorithm": "FLAT", } flat_vector = FlatVectorField(**flat_vector_field_data) # type: ignore[arg-type] assert flat_vector.datatype == "FLOAT32" assert flat_vector.distance_metric == "COSINE" assert flat_vector.initial_cap is None assert flat_vector.block_size is None def test_flat_vector_field_optional_values() -> None: """Test optional values for FlatVectorField.""" flat_vector_field_data = { "name": "example", "dims": 100, "algorithm": "FLAT", "initial_cap": 1000, "block_size": 10, } flat_vector = FlatVectorField(**flat_vector_field_data) # type: ignore[arg-type] assert flat_vector.initial_cap == 1000 assert flat_vector.block_size == 10 def test_hnsw_vector_field_defaults() -> None: """Test defaults for HNSWVectorField.""" hnsw_vector_field_data = { "name": "example", "dims": 100, "algorithm": "HNSW", } hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) # type: ignore[arg-type] assert hnsw_vector.datatype == "FLOAT32" assert hnsw_vector.distance_metric == "COSINE" assert hnsw_vector.initial_cap is None assert hnsw_vector.m == 16 assert hnsw_vector.ef_construction == 200 assert hnsw_vector.ef_runtime == 10 assert hnsw_vector.epsilon == 0.01 def test_hnsw_vector_field_optional_values() -> None: """Test optional values for HNSWVectorField.""" hnsw_vector_field_data = { "name": "example", "dims": 100, "algorithm": "HNSW", "initial_cap": 2000, "m": 10, "ef_construction": 250, "ef_runtime": 15, "epsilon": 0.05, } hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) # type: ignore[arg-type] assert hnsw_vector.initial_cap == 2000 assert hnsw_vector.m == 10 assert hnsw_vector.ef_construction == 250 assert hnsw_vector.ef_runtime == 15 assert hnsw_vector.epsilon == 0.05 def test_read_schema_dict_input() -> None: """Test read_schema with dict input.""" index_schema = { "text": [{"name": "content"}], "tag": [{"name": "tag"}], "vector": [{"name": "content_vector", "dims": 100, "algorithm": "FLAT"}], } output = read_schema(index_schema=index_schema) # type: ignore assert output == index_schema def test_redis_model_creation() -> None: # Test creating a RedisModel with a mixture of fields redis_model = RedisModel( text=[TextFieldSchema(name="content")], tag=[TagFieldSchema(name="tag")], numeric=[NumericFieldSchema(name="numeric")], vector=[FlatVectorField(name="flat_vector", dims=128, algorithm="FLAT")], ) assert redis_model.text[0].name == "content" assert redis_model.tag[0].name == "tag" # type: ignore assert redis_model.numeric[0].name == "numeric" # type: ignore assert redis_model.vector[0].name == "flat_vector" # type: ignore # Test the content_vector property with pytest.raises(ValueError): _ = ( redis_model.content_vector ) # this should fail because there's no field with name 'content_vector_key' def test_read_schema() -> None: # Test the read_schema function with invalid input with pytest.raises(TypeError): read_schema(index_schema=None) # non-dict and non-str/pathlike input
0
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores
lc_public_repos/langchain/libs/community/tests/unit_tests/vectorstores/redis/test_filters.py
from typing import Any import pytest from langchain_community.vectorstores.redis import ( RedisNum as Num, ) from langchain_community.vectorstores.redis import ( RedisTag as Tag, ) from langchain_community.vectorstores.redis import ( RedisText as Text, ) # Test cases for various tag scenarios @pytest.mark.parametrize( "operation,tags,expected", [ # Testing single tags ("==", "simpletag", "@tag_field:{simpletag}"), ( "==", "tag with space", "@tag_field:{tag\\ with\\ space}", ), # Escaping spaces within quotes ( "==", "special$char", "@tag_field:{special\\$char}", ), # Escaping a special character ("!=", "negated", "(-@tag_field:{negated})"), # Testing multiple tags ("==", ["tag1", "tag2"], "@tag_field:{tag1|tag2}"), ( "==", ["alpha", "beta with space", "gamma$special"], "@tag_field:{alpha|beta\\ with\\ space|gamma\\$special}", ), # Multiple tags with spaces and special chars ("!=", ["tagA", "tagB"], "(-@tag_field:{tagA|tagB})"), # Complex tag scenarios with special characters ("==", "weird:tag", "@tag_field:{weird\\:tag}"), # Tags with colon ("==", "tag&another", "@tag_field:{tag\\&another}"), # Tags with ampersand # Escaping various special characters within tags ("==", "tag/with/slashes", "@tag_field:{tag\\/with\\/slashes}"), ( "==", ["hyphen-tag", "under_score", "dot.tag"], "@tag_field:{hyphen\\-tag|under_score|dot\\.tag}", ), # ...additional unique cases as desired... ], ) def test_tag_filter_varied(operation: str, tags: str, expected: str) -> None: if operation == "==": tf = Tag("tag_field") == tags elif operation == "!=": tf = Tag("tag_field") != tags else: raise ValueError(f"Unsupported operation: {operation}") # Verify the string representation matches the expected RediSearch query part assert str(tf) == expected @pytest.mark.parametrize( "value, expected", [ (None, "*"), ([], "*"), ("", "*"), ([None], "*"), ([None, "tag"], "@tag_field:{tag}"), ], ids=[ "none", "empty_list", "empty_string", "list_with_none", "list_with_none_and_tag", ], ) def test_nullable_tags(value: Any, expected: str) -> None: tag = Tag("tag_field") assert str(tag == value) == expected @pytest.mark.parametrize( "operation, value, expected", [ ("__eq__", 5, "@numeric_field:[5 5]"), ("__ne__", 5, "(-@numeric_field:[5 5])"), ("__gt__", 5, "@numeric_field:[(5 +inf]"), ("__ge__", 5, "@numeric_field:[5 +inf]"), ("__lt__", 5.55, "@numeric_field:[-inf (5.55]"), ("__le__", 5, "@numeric_field:[-inf 5]"), ("__le__", None, "*"), ("__eq__", None, "*"), ("__ne__", None, "*"), ], ids=["eq", "ne", "gt", "ge", "lt", "le", "le_none", "eq_none", "ne_none"], ) def test_numeric_filter(operation: str, value: Any, expected: str) -> None: nf = Num("numeric_field") assert str(getattr(nf, operation)(value)) == expected @pytest.mark.parametrize( "operation, value, expected", [ ("__eq__", "text", '@text_field:("text")'), ("__ne__", "text", '(-@text_field:"text")'), ("__eq__", "", "*"), ("__ne__", "", "*"), ("__eq__", None, "*"), ("__ne__", None, "*"), ("__mod__", "text", "@text_field:(text)"), ("__mod__", "tex*", "@text_field:(tex*)"), ("__mod__", "%text%", "@text_field:(%text%)"), ("__mod__", "", "*"), ("__mod__", None, "*"), ], ids=[ "eq", "ne", "eq-empty", "ne-empty", "eq-none", "ne-none", "like", "like_wildcard", "like_full", "like_empty", "like_none", ], ) def test_text_filter(operation: str, value: Any, expected: str) -> None: txt_f = getattr(Text("text_field"), operation)(value) assert str(txt_f) == expected def test_filters_combination() -> None: tf1 = Tag("tag_field") == ["tag1", "tag2"] tf2 = Tag("tag_field") == "tag3" combined = tf1 & tf2 assert str(combined) == "(@tag_field:{tag1|tag2} @tag_field:{tag3})" combined = tf1 | tf2 assert str(combined) == "(@tag_field:{tag1|tag2} | @tag_field:{tag3})" tf1 = Tag("tag_field") == [] assert str(tf1) == "*" assert str(tf1 & tf2) == str(tf2) assert str(tf1 | tf2) == str(tf2) # test combining filters with None values and empty strings tf1 = Tag("tag_field") == None # noqa: E711 tf2 = Tag("tag_field") == "" assert str(tf1 & tf2) == "*" tf1 = Tag("tag_field") == None # noqa: E711 tf2 = Tag("tag_field") == "tag" assert str(tf1 & tf2) == str(tf2) tf1 = Tag("tag_field") == None # noqa: E711 tf2 = Tag("tag_field") == ["tag1", "tag2"] assert str(tf1 & tf2) == str(tf2) tf1 = Tag("tag_field") == None # noqa: E711 tf2 = Tag("tag_field") != None # noqa: E711 assert str(tf1 & tf2) == "*" tf1 = Tag("tag_field") == "" tf2 = Tag("tag_field") == "tag" tf3 = Tag("tag_field") == ["tag1", "tag2"] assert str(tf1 & tf2 & tf3) == str(tf2 & tf3) # test none filters for Tag Num Text tf1 = Tag("tag_field") == None # noqa: E711 tf2 = Num("num_field") == None # noqa: E711 tf3 = Text("text_field") == None # noqa: E711 assert str(tf1 & tf2 & tf3) == "*" tf1 = Tag("tag_field") != None # noqa: E711 tf2 = Num("num_field") != None # noqa: E711 tf3 = Text("text_field") != None # noqa: E711 assert str(tf1 & tf2 & tf3) == "*" # test combinations of real and None filters tf1 = Tag("tag_field") == "tag" tf2 = Num("num_field") == None # noqa: E711 tf3 = Text("text_field") == None # noqa: E711 assert str(tf1 & tf2 & tf3) == str(tf1)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_compressors/test_imports.py
from langchain_community.document_compressors import __all__, _module_lookup EXPECTED_ALL = [ "LLMLinguaCompressor", "OpenVINOReranker", "JinaRerank", "RankLLMRerank", "FlashrankRerank", "DashScopeRerank", "VolcengineRerank", "InfinityRerank", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/storage/test_sql.py
from typing import AsyncGenerator, Generator, cast import pytest import sqlalchemy as sa from langchain.storage._lc_store import create_kv_docstore, create_lc_store from langchain_core.documents import Document from langchain_core.stores import BaseStore from packaging import version from langchain_community.storage.sql import SQLStore is_sqlalchemy_v1 = version.parse(sa.__version__).major == 1 @pytest.fixture def sql_store() -> Generator[SQLStore, None, None]: store = SQLStore(namespace="test", db_url="sqlite://") store.create_schema() yield store @pytest.fixture async def async_sql_store() -> AsyncGenerator[SQLStore, None]: store = SQLStore(namespace="test", db_url="sqlite+aiosqlite://", async_mode=True) await store.acreate_schema() yield store @pytest.mark.xfail(is_sqlalchemy_v1, reason="SQLAlchemy 1.x issues") def test_create_lc_store(sql_store: SQLStore) -> None: """Test that a docstore is created from a base store.""" docstore: BaseStore[str, Document] = cast( BaseStore[str, Document], create_lc_store(sql_store) ) docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))]) fetched_doc = docstore.mget(["key1"])[0] assert fetched_doc is not None assert fetched_doc.page_content == "hello" assert fetched_doc.metadata == {"key": "value"} @pytest.mark.xfail(is_sqlalchemy_v1, reason="SQLAlchemy 1.x issues") def test_create_kv_store(sql_store: SQLStore) -> None: """Test that a docstore is created from a base store.""" docstore = create_kv_docstore(sql_store) docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))]) fetched_doc = docstore.mget(["key1"])[0] assert isinstance(fetched_doc, Document) assert fetched_doc.page_content == "hello" assert fetched_doc.metadata == {"key": "value"} @pytest.mark.requires("aiosqlite") async def test_async_create_kv_store(async_sql_store: SQLStore) -> None: """Test that a docstore is created from a base store.""" docstore = create_kv_docstore(async_sql_store) await docstore.amset( [("key1", Document(page_content="hello", metadata={"key": "value"}))] ) fetched_doc = (await docstore.amget(["key1"]))[0] assert isinstance(fetched_doc, Document) assert fetched_doc.page_content == "hello" assert fetched_doc.metadata == {"key": "value"} @pytest.mark.xfail(is_sqlalchemy_v1, reason="SQLAlchemy 1.x issues") def test_sample_sql_docstore(sql_store: SQLStore) -> None: # Set values for keys sql_store.mset([("key1", b"value1"), ("key2", b"value2")]) # Get values for keys values = sql_store.mget(["key1", "key2"]) # Returns [b"value1", b"value2"] assert values == [b"value1", b"value2"] # Delete keys sql_store.mdelete(["key1"]) # Iterate over keys assert [key for key in sql_store.yield_keys()] == ["key2"] @pytest.mark.requires("aiosqlite") async def test_async_sample_sql_docstore(async_sql_store: SQLStore) -> None: # Set values for keys await async_sql_store.amset([("key1", b"value1"), ("key2", b"value2")]) # sql_store.mset([("key1", "value1"), ("key2", "value2")]) # Get values for keys values = await async_sql_store.amget( ["key1", "key2"] ) # Returns [b"value1", b"value2"] assert values == [b"value1", b"value2"] # Delete keys await async_sql_store.amdelete(["key1"]) # Iterate over keys assert [key async for key in async_sql_store.ayield_keys()] == ["key2"]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/storage/test_mongodb.py
"""Light weight unit test that attempts to import MongodbStore. The actual code is tested in integration tests. This test is intended to catch errors in the import process. """ def test_import_storage() -> None: """Attempt to import storage modules.""" from langchain_community.storage.mongodb import MongoDBStore # noqa
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/storage/test_imports.py
from langchain_community.storage import __all__, _module_lookup EXPECTED_ALL = [ "AstraDBStore", "AstraDBByteStore", "CassandraByteStore", "MongoDBByteStore", "MongoDBStore", "SQLStore", "RedisStore", "UpstashRedisByteStore", "UpstashRedisStore", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/storage/test_redis.py
"""Light weight unit test that attempts to import RedisStore. The actual code is tested in integration tests. This test is intended to catch errors in the import process. """ def test_import_storage() -> None: """Attempt to import storage modules.""" from langchain_community.storage.redis import RedisStore # noqa
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/storage/test_upstash_redis.py
"""Light weight unit test that attempts to import UpstashRedisStore.""" import pytest @pytest.mark.requires("upstash_redis") def test_import_storage() -> None: from langchain_community.storage.upstash_redis import UpstashRedisStore # noqa
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/indexes/test_sql_record_manager.py
from datetime import datetime from unittest.mock import patch import pytest import pytest_asyncio from sqlalchemy import select from langchain_community.indexes._sql_record_manager import ( SQLRecordManager, UpsertionRecord, ) @pytest.fixture() def manager() -> SQLRecordManager: """Initialize the test database and yield the TimestampedSet instance.""" # Initialize and yield the TimestampedSet instance record_manager = SQLRecordManager("kittens", db_url="sqlite:///:memory:") record_manager.create_schema() return record_manager @pytest_asyncio.fixture # type: ignore @pytest.mark.requires("aiosqlite") async def amanager() -> SQLRecordManager: """Initialize the test database and yield the TimestampedSet instance.""" # Initialize and yield the TimestampedSet instance record_manager = SQLRecordManager( "kittens", db_url="sqlite+aiosqlite:///:memory:", async_mode=True, ) await record_manager.acreate_schema() return record_manager def test_update(manager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set read_keys = manager.list_keys() assert read_keys == [] # Insert records keys = ["key1", "key2", "key3"] manager.update(keys) # Retrieve the records read_keys = manager.list_keys() assert read_keys == ["key1", "key2", "key3"] @pytest.mark.requires("aiosqlite") async def test_aupdate(amanager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set read_keys = await amanager.alist_keys() assert read_keys == [] # Insert records keys = ["key1", "key2", "key3"] await amanager.aupdate(keys) # Retrieve the records read_keys = await amanager.alist_keys() assert read_keys == ["key1", "key2", "key3"] def test_update_timestamp(manager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set with patch.object( manager, "get_time", return_value=datetime(2021, 1, 2).timestamp() ): manager.update(["key1"]) with manager._make_session() as session: records = ( session.query(UpsertionRecord) .filter(UpsertionRecord.namespace == manager.namespace) .all() # type: ignore[attr-defined] ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": None, "key": "key1", "namespace": "kittens", "updated_at": datetime(2021, 1, 2, 0, 0).timestamp(), } ] with patch.object( manager, "get_time", return_value=datetime(2023, 1, 2).timestamp() ): manager.update(["key1"]) with manager._make_session() as session: records = ( session.query(UpsertionRecord) .filter(UpsertionRecord.namespace == manager.namespace) .all() # type: ignore[attr-defined] ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": None, "key": "key1", "namespace": "kittens", "updated_at": datetime(2023, 1, 2, 0, 0).timestamp(), } ] with patch.object( manager, "get_time", return_value=datetime(2023, 2, 2).timestamp() ): manager.update(["key1"], group_ids=["group1"]) with manager._make_session() as session: records = ( session.query(UpsertionRecord) .filter(UpsertionRecord.namespace == manager.namespace) .all() # type: ignore[attr-defined] ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": "group1", "key": "key1", "namespace": "kittens", "updated_at": datetime(2023, 2, 2, 0, 0).timestamp(), } ] @pytest.mark.requires("aiosqlite") async def test_aupdate_timestamp(amanager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set with patch.object( amanager, "aget_time", return_value=datetime(2021, 1, 2).timestamp() ): await amanager.aupdate(["key1"]) async with amanager._amake_session() as session: records = ( ( await session.execute( select(UpsertionRecord).filter( UpsertionRecord.namespace == amanager.namespace ) ) ) .scalars() .all() ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": None, "key": "key1", "namespace": "kittens", "updated_at": datetime(2021, 1, 2, 0, 0).timestamp(), } ] with patch.object( amanager, "aget_time", return_value=datetime(2023, 1, 2).timestamp() ): await amanager.aupdate(["key1"]) async with amanager._amake_session() as session: records = ( ( await session.execute( select(UpsertionRecord).filter( UpsertionRecord.namespace == amanager.namespace ) ) ) .scalars() .all() ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": None, "key": "key1", "namespace": "kittens", "updated_at": datetime(2023, 1, 2, 0, 0).timestamp(), } ] with patch.object( amanager, "aget_time", return_value=datetime(2023, 2, 2).timestamp() ): await amanager.aupdate(["key1"], group_ids=["group1"]) async with amanager._amake_session() as session: records = ( ( await session.execute( select(UpsertionRecord).filter( UpsertionRecord.namespace == amanager.namespace ) ) ) .scalars() .all() ) assert [ { "key": record.key, "namespace": record.namespace, "updated_at": record.updated_at, "group_id": record.group_id, } for record in records ] == [ { "group_id": "group1", "key": "key1", "namespace": "kittens", "updated_at": datetime(2023, 2, 2, 0, 0).timestamp(), } ] def test_update_with_group_ids(manager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set read_keys = manager.list_keys() assert read_keys == [] # Insert records keys = ["key1", "key2", "key3"] manager.update(keys) # Retrieve the records read_keys = manager.list_keys() assert read_keys == ["key1", "key2", "key3"] @pytest.mark.requires("aiosqlite") async def test_aupdate_with_group_ids(amanager: SQLRecordManager) -> None: """Test updating records in the database.""" # no keys should be present in the set read_keys = await amanager.alist_keys() assert read_keys == [] # Insert records keys = ["key1", "key2", "key3"] await amanager.aupdate(keys) # Retrieve the records read_keys = await amanager.alist_keys() assert read_keys == ["key1", "key2", "key3"] def test_exists(manager: SQLRecordManager) -> None: """Test checking if keys exist in the database.""" # Insert records keys = ["key1", "key2", "key3"] manager.update(keys) # Check if the keys exist in the database exists = manager.exists(keys) assert len(exists) == len(keys) assert exists == [True, True, True] exists = manager.exists(["key1", "key4"]) assert len(exists) == 2 assert exists == [True, False] @pytest.mark.requires("aiosqlite") async def test_aexists(amanager: SQLRecordManager) -> None: """Test checking if keys exist in the database.""" # Insert records keys = ["key1", "key2", "key3"] await amanager.aupdate(keys) # Check if the keys exist in the database exists = await amanager.aexists(keys) assert len(exists) == len(keys) assert exists == [True, True, True] exists = await amanager.aexists(["key1", "key4"]) assert len(exists) == 2 assert exists == [True, False] def test_list_keys(manager: SQLRecordManager) -> None: """Test listing keys based on the provided date range.""" # Insert records assert manager.list_keys() == [] with manager._make_session() as session: # Add some keys with explicit updated_ats session.add( UpsertionRecord( key="key1", updated_at=datetime(2021, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key2", updated_at=datetime(2022, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key3", updated_at=datetime(2023, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key4", group_id="group1", updated_at=datetime(2024, 1, 1).timestamp(), namespace="kittens", ) ) # Insert keys from a different namespace, these should not be visible! session.add( UpsertionRecord( key="key1", updated_at=datetime(2021, 1, 1).timestamp(), namespace="puppies", ) ) session.add( UpsertionRecord( key="key5", updated_at=datetime(2021, 1, 1).timestamp(), namespace="puppies", ) ) session.commit() # Retrieve all keys assert manager.list_keys() == ["key1", "key2", "key3", "key4"] # Retrieve keys updated after a certain date assert manager.list_keys(after=datetime(2022, 2, 1).timestamp()) == ["key3", "key4"] # Retrieve keys updated after a certain date assert manager.list_keys(before=datetime(2022, 2, 1).timestamp()) == [ "key1", "key2", ] # Retrieve keys updated after a certain date assert manager.list_keys(before=datetime(2019, 2, 1).timestamp()) == [] # Retrieve keys in a time range assert manager.list_keys( before=datetime(2022, 2, 1).timestamp(), after=datetime(2021, 11, 1).timestamp(), ) == ["key2"] assert manager.list_keys(group_ids=["group1", "group2"]) == ["key4"] # Test multiple filters assert ( manager.list_keys( group_ids=["group1", "group2"], before=datetime(2019, 1, 1).timestamp() ) == [] ) assert manager.list_keys( group_ids=["group1", "group2"], after=datetime(2019, 1, 1).timestamp() ) == ["key4"] @pytest.mark.requires("aiosqlite") async def test_alist_keys(amanager: SQLRecordManager) -> None: """Test listing keys based on the provided date range.""" # Insert records assert await amanager.alist_keys() == [] async with amanager._amake_session() as session: # Add some keys with explicit updated_ats session.add( UpsertionRecord( key="key1", updated_at=datetime(2021, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key2", updated_at=datetime(2022, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key3", updated_at=datetime(2023, 1, 1).timestamp(), namespace="kittens", ) ) session.add( UpsertionRecord( key="key4", group_id="group1", updated_at=datetime(2024, 1, 1).timestamp(), namespace="kittens", ) ) # Insert keys from a different namespace, these should not be visible! session.add( UpsertionRecord( key="key1", updated_at=datetime(2021, 1, 1).timestamp(), namespace="puppies", ) ) session.add( UpsertionRecord( key="key5", updated_at=datetime(2021, 1, 1).timestamp(), namespace="puppies", ) ) await session.commit() # Retrieve all keys assert await amanager.alist_keys() == ["key1", "key2", "key3", "key4"] # Retrieve keys updated after a certain date assert await amanager.alist_keys(after=datetime(2022, 2, 1).timestamp()) == [ "key3", "key4", ] # Retrieve keys updated after a certain date assert await amanager.alist_keys(before=datetime(2022, 2, 1).timestamp()) == [ "key1", "key2", ] # Retrieve keys updated after a certain date assert await amanager.alist_keys(before=datetime(2019, 2, 1).timestamp()) == [] # Retrieve keys in a time range assert await amanager.alist_keys( before=datetime(2022, 2, 1).timestamp(), after=datetime(2021, 11, 1).timestamp(), ) == ["key2"] assert await amanager.alist_keys(group_ids=["group1", "group2"]) == ["key4"] # Test multiple filters assert ( await amanager.alist_keys( group_ids=["group1", "group2"], before=datetime(2019, 1, 1).timestamp() ) == [] ) assert await amanager.alist_keys( group_ids=["group1", "group2"], after=datetime(2019, 1, 1).timestamp() ) == ["key4"] def test_namespace_is_used(manager: SQLRecordManager) -> None: """Verify that namespace is taken into account for all operations.""" assert manager.namespace == "kittens" with manager._make_session() as session: # Add some keys with explicit updated_ats session.add(UpsertionRecord(key="key1", namespace="kittens")) session.add(UpsertionRecord(key="key2", namespace="kittens")) session.add(UpsertionRecord(key="key1", namespace="puppies")) session.add(UpsertionRecord(key="key3", namespace="puppies")) session.commit() assert manager.list_keys() == ["key1", "key2"] manager.delete_keys(["key1"]) assert manager.list_keys() == ["key2"] manager.update(["key3"], group_ids=["group3"]) with manager._make_session() as session: results = session.query(UpsertionRecord).all() assert sorted([(r.namespace, r.key, r.group_id) for r in results]) == [ ("kittens", "key2", None), ("kittens", "key3", "group3"), ("puppies", "key1", None), ("puppies", "key3", None), ] @pytest.mark.requires("aiosqlite") async def test_anamespace_is_used(amanager: SQLRecordManager) -> None: """Verify that namespace is taken into account for all operations.""" assert amanager.namespace == "kittens" async with amanager._amake_session() as session: # Add some keys with explicit updated_ats session.add(UpsertionRecord(key="key1", namespace="kittens")) session.add(UpsertionRecord(key="key2", namespace="kittens")) session.add(UpsertionRecord(key="key1", namespace="puppies")) session.add(UpsertionRecord(key="key3", namespace="puppies")) await session.commit() assert await amanager.alist_keys() == ["key1", "key2"] await amanager.adelete_keys(["key1"]) assert await amanager.alist_keys() == ["key2"] await amanager.aupdate(["key3"], group_ids=["group3"]) async with amanager._amake_session() as session: results = (await session.execute(select(UpsertionRecord))).scalars().all() assert sorted([(r.namespace, r.key, r.group_id) for r in results]) == [ ("kittens", "key2", None), ("kittens", "key3", "group3"), ("puppies", "key1", None), ("puppies", "key3", None), ] def test_delete_keys(manager: SQLRecordManager) -> None: """Test deleting keys from the database.""" # Insert records keys = ["key1", "key2", "key3"] manager.update(keys) # Delete some keys keys_to_delete = ["key1", "key2"] manager.delete_keys(keys_to_delete) # Check if the deleted keys are no longer in the database remaining_keys = manager.list_keys() assert remaining_keys == ["key3"] @pytest.mark.requires("aiosqlite") async def test_adelete_keys(amanager: SQLRecordManager) -> None: """Test deleting keys from the database.""" # Insert records keys = ["key1", "key2", "key3"] await amanager.aupdate(keys) # Delete some keys keys_to_delete = ["key1", "key2"] await amanager.adelete_keys(keys_to_delete) # Check if the deleted keys are no longer in the database remaining_keys = await amanager.alist_keys() assert remaining_keys == ["key3"]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/load/test_serializable.py
import importlib import inspect import pkgutil from types import ModuleType from langchain_core.load.mapping import SERIALIZABLE_MAPPING def import_all_modules(package_name: str) -> dict: package = importlib.import_module(package_name) classes: dict = {} def _handle_module(module: ModuleType) -> None: # Iterate over all members of the module names = dir(module) if hasattr(module, "__all__"): names += list(module.__all__) names = sorted(set(names)) for name in names: # Check if it's a class or function attr = getattr(module, name) if not inspect.isclass(attr): continue if not hasattr(attr, "is_lc_serializable") or not isinstance(attr, type): continue if ( isinstance(attr.is_lc_serializable(), bool) # type: ignore and attr.is_lc_serializable() # type: ignore ): key = tuple(attr.lc_id()) # type: ignore value = tuple(attr.__module__.split(".") + [attr.__name__]) if key in classes and classes[key] != value: raise ValueError classes[key] = value _handle_module(package) for importer, modname, ispkg in pkgutil.walk_packages( package.__path__, package.__name__ + "." ): try: module = importlib.import_module(modname) except ModuleNotFoundError: continue _handle_module(module) return classes def test_import_all_modules() -> None: """Test import all modules works as expected""" all_modules = import_all_modules("langchain") filtered_modules = [ k for k in all_modules if len(k) == 4 and tuple(k[:2]) == ("langchain", "chat_models") ] # This test will need to be updated if new serializable classes are added # to community assert sorted(filtered_modules) == sorted( [ ("langchain", "chat_models", "azure_openai", "AzureChatOpenAI"), ("langchain", "chat_models", "bedrock", "BedrockChat"), ("langchain", "chat_models", "anthropic", "ChatAnthropic"), ("langchain", "chat_models", "fireworks", "ChatFireworks"), ("langchain", "chat_models", "google_palm", "ChatGooglePalm"), ("langchain", "chat_models", "openai", "ChatOpenAI"), ("langchain", "chat_models", "vertexai", "ChatVertexAI"), ] ) def test_serializable_mapping() -> None: to_skip = { # This should have had a different namespace, as it was never # exported from the langchain module, but we keep for whoever has # already serialized it. ("langchain", "prompts", "image", "ImagePromptTemplate"): ( "langchain_core", "prompts", "image", "ImagePromptTemplate", ), # This is not exported from langchain, only langchain_core ("langchain_core", "prompts", "structured", "StructuredPrompt"): ( "langchain_core", "prompts", "structured", "StructuredPrompt", ), # This is not exported from langchain, only langchain_core ("langchain", "schema", "messages", "RemoveMessage"): ( "langchain_core", "messages", "modifier", "RemoveMessage", ), ("langchain", "chat_models", "mistralai", "ChatMistralAI"): ( "langchain_mistralai", "chat_models", "ChatMistralAI", ), ("langchain_groq", "chat_models", "ChatGroq"): ( "langchain_groq", "chat_models", "ChatGroq", ), # TODO(0.3): For now we're skipping the below two tests. Need to fix # so that it only runs when langchain-aws, langchain-google-genai # are installed. ("langchain", "chat_models", "bedrock", "ChatBedrock"): ( "langchain_aws", "chat_models", "bedrock", "ChatBedrock", ), ("langchain_google_genai", "chat_models", "ChatGoogleGenerativeAI"): ( "langchain_google_genai", "chat_models", "ChatGoogleGenerativeAI", ), } serializable_modules = import_all_modules("langchain") missing = set(SERIALIZABLE_MAPPING).difference( set(serializable_modules).union(to_skip) ) assert missing == set() extra = set(serializable_modules).difference(SERIALIZABLE_MAPPING) assert extra == set() for k, import_path in serializable_modules.items(): import_dir, import_obj = import_path[:-1], import_path[-1] # Import module mod = importlib.import_module(".".join(import_dir)) # Import class cls = getattr(mod, import_obj) assert list(k) == cls.lc_id()
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/load/test_dump.py
"""Test for Serializable base class""" import json import os from typing import Any, Dict, List from unittest.mock import patch import pytest from langchain.chains.llm import LLMChain from langchain_core.load.dump import dumps from langchain_core.load.serializable import Serializable from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.prompts.prompt import PromptTemplate from langchain_core.tracers.langchain import LangChainTracer from pydantic import ConfigDict, Field, model_validator class Person(Serializable): secret: str you_can_see_me: str = "hello" @classmethod def is_lc_serializable(cls) -> bool: return True @property def lc_secrets(self) -> Dict[str, str]: return {"secret": "SECRET"} @property def lc_attributes(self) -> Dict[str, str]: return {"you_can_see_me": self.you_can_see_me} class SpecialPerson(Person): another_secret: str another_visible: str = "bye" @classmethod def get_lc_namespace(cls) -> List[str]: return ["my", "special", "namespace"] # Gets merged with parent class's secrets @property def lc_secrets(self) -> Dict[str, str]: return {"another_secret": "ANOTHER_SECRET"} # Gets merged with parent class's attributes @property def lc_attributes(self) -> Dict[str, str]: return {"another_visible": self.another_visible} class NotSerializable: pass def test_person(snapshot: Any) -> None: p = Person(secret="hello") assert dumps(p, pretty=True) == snapshot sp = SpecialPerson(another_secret="Wooo", secret="Hmm") assert dumps(sp, pretty=True) == snapshot assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"] assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"] def test_typeerror() -> None: assert ( dumps({(1, 2): 3}) == """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501 ) @pytest.mark.requires("openai") def test_serialize_openai_llm(snapshot: Any) -> None: from langchain_community.llms.openai import OpenAI with patch.dict(os.environ, {"LANGCHAIN_API_KEY": "test-api-key"}): llm = OpenAI( # type: ignore[call-arg] model="davinci", temperature=0.5, openai_api_key="hello", # This is excluded from serialization callbacks=[LangChainTracer()], ) llm.temperature = 0.7 # this is reflected in serialization assert dumps(llm, pretty=True) == snapshot @pytest.mark.requires("openai") def test_serialize_llmchain(snapshot: Any) -> None: from langchain_community.llms.openai import OpenAI llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg] prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot @pytest.mark.requires("openai") def test_serialize_llmchain_env() -> None: from langchain_community.llms.openai import OpenAI llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg] prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm_2 = OpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg] prompt_2 = PromptTemplate.from_template("hello {name}!") chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_serialize_llmchain_chat(snapshot: Any) -> None: from langchain_community.chat_models.openai import ChatOpenAI llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg] prompt = ChatPromptTemplate.from_messages( [HumanMessagePromptTemplate.from_template("hello {name}!")] ) chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm_2 = ChatOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg] prompt_2 = ChatPromptTemplate.from_messages( [HumanMessagePromptTemplate.from_template("hello {name}!")] ) chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None: from langchain_community.llms.openai import OpenAI llm = OpenAI( # type: ignore[call-arg] model="davinci", temperature=0.5, openai_api_key="hello", client=NotSerializable, ) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot def test_person_with_kwargs(snapshot: Any) -> None: person = Person(secret="hello") assert dumps(person, separators=(",", ":")) == snapshot def test_person_with_invalid_kwargs() -> None: person = Person(secret="hello") with pytest.raises(TypeError): dumps(person, invalid_kwarg="hello") class TestClass(Serializable): my_favorite_secret: str = Field(alias="my_favorite_secret_alias") my_other_secret: str = Field() model_config = ConfigDict( populate_by_name=True, ) @model_validator(mode="before") @classmethod def get_from_env(cls, values: Dict) -> Any: """Get the values from the environment.""" if "my_favorite_secret" not in values: values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET") if "my_other_secret" not in values: values["my_other_secret"] = os.getenv("MY_OTHER_SECRET") return values @classmethod def is_lc_serializable(cls) -> bool: return True @classmethod def get_lc_namespace(cls) -> List[str]: return ["my", "special", "namespace"] @property def lc_secrets(self) -> Dict[str, str]: return { "my_favorite_secret": "MY_FAVORITE_SECRET", "my_other_secret": "MY_OTHER_SECRET", } def test_aliases_hidden() -> None: test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg] dumped = json.loads(dumps(test_class, pretty=True)) expected_dump = { "lc": 1, "type": "constructor", "id": ["my", "special", "namespace", "TestClass"], "kwargs": { "my_favorite_secret": { "lc": 1, "type": "secret", "id": ["MY_FAVORITE_SECRET"], }, "my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]}, }, } assert dumped == expected_dump # Check while patching the os environment with patch.dict( os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"} ): test_class = TestClass() # type: ignore[call-arg] dumped = json.loads(dumps(test_class, pretty=True)) # Check by alias test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg] dumped = json.loads(dumps(test_class, pretty=True)) expected_dump = { "lc": 1, "type": "constructor", "id": ["my", "special", "namespace", "TestClass"], "kwargs": { "my_favorite_secret": { "lc": 1, "type": "secret", "id": ["MY_FAVORITE_SECRET"], }, "my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]}, }, } assert dumped == expected_dump
0
lc_public_repos/langchain/libs/community/tests/unit_tests/load
lc_public_repos/langchain/libs/community/tests/unit_tests/load/__snapshots__/test_dump.ambr
# serializer version: 1 # name: test_person ''' { "lc": 1, "type": "constructor", "id": [ "tests", "unit_tests", "load", "test_dump", "Person" ], "kwargs": { "secret": { "lc": 1, "type": "secret", "id": [ "SECRET" ] }, "you_can_see_me": "hello" } } ''' # --- # name: test_person.1 ''' { "lc": 1, "type": "constructor", "id": [ "my", "special", "namespace", "SpecialPerson" ], "kwargs": { "secret": { "lc": 1, "type": "secret", "id": [ "SECRET" ] }, "you_can_see_me": "hello", "another_secret": { "lc": 1, "type": "secret", "id": [ "ANOTHER_SECRET" ] }, "another_visible": "bye" } } ''' # --- # name: test_person_with_kwargs '{"lc":1,"type":"constructor","id":["tests","unit_tests","load","test_dump","Person"],"kwargs":{"secret":{"lc":1,"type":"secret","id":["SECRET"]},"you_can_see_me":"hello"}}' # --- # name: test_serialize_llmchain ''' { "lc": 1, "type": "constructor", "id": [ "langchain", "chains", "llm", "LLMChain" ], "kwargs": { "verbose": false, "prompt": { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "prompt", "PromptTemplate" ], "kwargs": { "input_variables": [ "name" ], "template": "hello {name}!", "template_format": "f-string" }, "name": "PromptTemplate" }, "llm": { "lc": 1, "type": "constructor", "id": [ "langchain", "llms", "openai", "OpenAI" ], "kwargs": { "model_name": "davinci", "temperature": 0.5, "max_tokens": 256, "top_p": 1.0, "n": 1, "best_of": 1, "openai_api_key": { "lc": 1, "type": "secret", "id": [ "OPENAI_API_KEY" ] }, "openai_proxy": "", "batch_size": 20, "max_retries": 2, "disallowed_special": "all" }, "name": "OpenAI" }, "output_key": "text", "output_parser": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "output_parser", "StrOutputParser" ], "kwargs": {}, "name": "StrOutputParser" }, "return_final_only": true }, "name": "LLMChain" } ''' # --- # name: test_serialize_llmchain_chat ''' { "lc": 1, "type": "constructor", "id": [ "langchain", "chains", "llm", "LLMChain" ], "kwargs": { "verbose": false, "prompt": { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "ChatPromptTemplate" ], "kwargs": { "input_variables": [ "name" ], "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "chat", "HumanMessagePromptTemplate" ], "kwargs": { "prompt": { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "prompt", "PromptTemplate" ], "kwargs": { "input_variables": [ "name" ], "template": "hello {name}!", "template_format": "f-string" }, "name": "PromptTemplate" } } } ] }, "name": "ChatPromptTemplate" }, "llm": { "lc": 1, "type": "constructor", "id": [ "langchain", "chat_models", "openai", "ChatOpenAI" ], "kwargs": { "model_name": "davinci", "temperature": 0.5, "openai_api_key": { "lc": 1, "type": "secret", "id": [ "OPENAI_API_KEY" ] }, "openai_proxy": "", "max_retries": 2, "n": 1 }, "name": "ChatOpenAI" }, "output_key": "text", "output_parser": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "output_parser", "StrOutputParser" ], "kwargs": {}, "name": "StrOutputParser" }, "return_final_only": true }, "name": "LLMChain" } ''' # --- # name: test_serialize_llmchain_with_non_serializable_arg ''' { "lc": 1, "type": "constructor", "id": [ "langchain", "chains", "llm", "LLMChain" ], "kwargs": { "verbose": false, "prompt": { "lc": 1, "type": "constructor", "id": [ "langchain", "prompts", "prompt", "PromptTemplate" ], "kwargs": { "input_variables": [ "name" ], "template": "hello {name}!", "template_format": "f-string" }, "name": "PromptTemplate" }, "llm": { "lc": 1, "type": "constructor", "id": [ "langchain", "llms", "openai", "OpenAI" ], "kwargs": { "model_name": "davinci", "temperature": 0.5, "max_tokens": 256, "top_p": 1.0, "n": 1, "best_of": 1, "openai_api_key": { "lc": 1, "type": "secret", "id": [ "OPENAI_API_KEY" ] }, "openai_proxy": "", "batch_size": 20, "max_retries": 2, "disallowed_special": "all" }, "name": "OpenAI" }, "output_key": "text", "output_parser": { "lc": 1, "type": "constructor", "id": [ "langchain", "schema", "output_parser", "StrOutputParser" ], "kwargs": {}, "name": "StrOutputParser" }, "return_final_only": true }, "name": "LLMChain" } ''' # --- # name: test_serialize_openai_llm ''' { "lc": 1, "type": "constructor", "id": [ "langchain", "llms", "openai", "OpenAI" ], "kwargs": { "model_name": "davinci", "temperature": 0.7, "max_tokens": 256, "top_p": 1.0, "n": 1, "best_of": 1, "openai_api_key": { "lc": 1, "type": "secret", "id": [ "OPENAI_API_KEY" ] }, "openai_proxy": "", "batch_size": 20, "max_retries": 2, "disallowed_special": "all" }, "name": "OpenAI" } ''' # ---
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agent_toolkits/test_load_tools.py
from langchain_community.agent_toolkits.load_tools import load_tools from langchain_community.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) def test_load_request_tools() -> None: request_tools = load_tools(["requests_all"], allow_dangerous_tools=True) assert len(request_tools) == 5 assert any(isinstance(tool, RequestsDeleteTool) for tool in request_tools) assert any(isinstance(tool, RequestsGetTool) for tool in request_tools) assert any(isinstance(tool, RequestsPatchTool) for tool in request_tools) assert any(isinstance(tool, RequestsPostTool) for tool in request_tools) assert any(isinstance(tool, RequestsPutTool) for tool in request_tools)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/agent_toolkits/test_imports.py
from langchain_community.agent_toolkits import __all__, _module_lookup EXPECTED_ALL = [ "AINetworkToolkit", "AmadeusToolkit", "AzureAiServicesToolkit", "AzureCognitiveServicesToolkit", "ConneryToolkit", "FileManagementToolkit", "GmailToolkit", "JiraToolkit", "JsonToolkit", "MultionToolkit", "NasaToolkit", "NLAToolkit", "O365Toolkit", "OpenAPIToolkit", "PlayWrightBrowserToolkit", "PolygonToolkit", "PowerBIToolkit", "SlackToolkit", "SteamToolkit", "SQLDatabaseToolkit", "SparkSQLToolkit", "ZapierToolkit", "create_json_agent", "create_openapi_agent", "create_pbi_agent", "create_pbi_chat_agent", "create_spark_sql_agent", "create_sql_agent", "CogniswitchToolkit", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/cross_encoders/test_imports.py
from langchain_community.cross_encoders import __all__, _module_lookup EXPECTED_ALL = [ "BaseCrossEncoder", "FakeCrossEncoder", "HuggingFaceCrossEncoder", "SagemakerEndpointCrossEncoder", ] def test_all_imports() -> None: """Test that __all__ is correctly set.""" assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_cerebriumai.py
"""Test CerebriumAI llm""" from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.cerebriumai import CerebriumAI def test_api_key_is_secret_string() -> None: llm = CerebriumAI(cerebriumai_api_key="test-cerebriumai-api-key") # type: ignore[arg-type] assert isinstance(llm.cerebriumai_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: llm = CerebriumAI(cerebriumai_api_key="secret-api-key") # type: ignore[arg-type] print(llm.cerebriumai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')" def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: monkeypatch.setenv("CEREBRIUMAI_API_KEY", "secret-api-key") llm = CerebriumAI() print(llm.cerebriumai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_callbacks.py
"""Test LLM callbacks.""" from langchain_core.messages import HumanMessage from langchain_community.chat_models.fake import FakeListChatModel from langchain_community.llms.fake import FakeListLLM from tests.unit_tests.callbacks.fake_callback_handler import ( FakeCallbackHandler, FakeCallbackHandlerWithChatStart, ) def test_llm_with_callbacks() -> None: """Test LLM callbacks.""" handler = FakeCallbackHandler() llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"]) output = llm.invoke("foo") assert output == "foo" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_chat_model_with_v1_callbacks() -> None: """Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandler() llm = FakeListChatModel( callbacks=[handler], verbose=True, responses=["fake response"] ) output = llm.invoke([HumanMessage(content="foo")]) assert output.content == "fake response" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 assert handler.llm_starts == 1 assert handler.llm_ends == 1 def test_chat_model_with_v2_callbacks() -> None: """Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandlerWithChatStart() llm = FakeListChatModel( callbacks=[handler], verbose=True, responses=["fake response"] ) output = llm.invoke([HumanMessage(content="foo")]) assert output.content == "fake response" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 assert handler.llm_starts == 0 assert handler.llm_ends == 1 assert handler.chat_model_starts == 1
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_oci_generative_ai.py
"""Test OCI Generative AI LLM service""" from unittest.mock import MagicMock import pytest from pytest import MonkeyPatch from langchain_community.llms.oci_generative_ai import OCIGenAI class MockResponseDict(dict): def __getattr__(self, val): # type: ignore[no-untyped-def] return self[val] @pytest.mark.requires("oci") @pytest.mark.parametrize( "test_model_id", ["cohere.command", "cohere.command-light", "meta.llama-2-70b-chat"] ) def test_llm_complete(monkeypatch: MonkeyPatch, test_model_id: str) -> None: """Test valid completion call to OCI Generative AI LLM service.""" oci_gen_ai_client = MagicMock() llm = OCIGenAI(model_id=test_model_id, client=oci_gen_ai_client) model_id = llm.model_id if model_id is None: raise ValueError("Model ID is required for OCI Generative AI LLM service.") provider = model_id.split(".")[0].lower() def mocked_response(*args): # type: ignore[no-untyped-def] response_text = "This is the completion." if provider == "cohere": return MockResponseDict( { "status": 200, "data": MockResponseDict( { "inference_response": MockResponseDict( { "generated_texts": [ MockResponseDict( { "text": response_text, } ) ] } ) } ), } ) if provider == "meta": return MockResponseDict( { "status": 200, "data": MockResponseDict( { "inference_response": MockResponseDict( { "choices": [ MockResponseDict( { "text": response_text, } ) ] } ) } ), } ) monkeypatch.setattr(llm.client, "generate_text", mocked_response) output = llm.invoke("This is a prompt.", temperature=0.2) assert output == "This is the completion."
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_pipelineai.py
from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.llms.pipelineai import PipelineAI def test_api_key_is_string() -> None: llm = PipelineAI(pipeline_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.pipeline_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = PipelineAI(pipeline_api_key="secret-api-key") # type: ignore[arg-type] print(llm.pipeline_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_ai21.py
"""Test AI21 llm""" from typing import cast from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.ai21 import AI21 def test_api_key_is_secret_string() -> None: llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.ai21_api_key, SecretStr) def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("AI21_API_KEY", "secret-api-key") llm = AI21() print(llm.ai21_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] print(llm.ai21_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] assert cast(SecretStr, llm.ai21_api_key).get_secret_value() == "secret-api-key"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_symblai_nebula.py
"""Test the Nebula model by Symbl.ai""" from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.symblai_nebula import Nebula def test_api_key_is_secret_string() -> None: llm = Nebula(nebula_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.nebula_api_key, SecretStr) assert llm.nebula_api_key.get_secret_value() == "secret-api-key" def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: monkeypatch.setenv("NEBULA_API_KEY", "secret-api-key") llm = Nebula() print(llm.nebula_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: llm = Nebula(nebula_api_key="secret-api-key") # type: ignore[arg-type] print(llm.nebula_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_openai.py
import pytest from langchain_community.llms.openai import OpenAI from langchain_community.utils.openai import is_openai_v1 def _openai_v1_installed() -> bool: try: return is_openai_v1() except Exception as _: return False @pytest.mark.requires("openai") def test_openai_model_param() -> None: llm = OpenAI(model="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" llm = OpenAI(model_name="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" @pytest.mark.requires("openai") def test_openai_model_kwargs() -> None: llm = OpenAI(model_kwargs={"foo": "bar"}, openai_api_key="foo") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} @pytest.mark.requires("openai") def test_openai_fields_model_kwargs() -> None: """Test that for backwards compatibility fields can be passed in as model_kwargs.""" llm = OpenAI(model_kwargs={"model_name": "foo"}, api_key="foo") assert llm.model_name == "foo" llm = OpenAI(model_kwargs={"model": "foo"}, api_key="foo") assert llm.model_name == "foo" @pytest.mark.requires("openai") def test_openai_incorrect_field() -> None: with pytest.warns(match="not default parameter"): llm = OpenAI(foo="bar", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} @pytest.fixture def mock_completion() -> dict: return { "id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ", "object": "text_completion", "created": 1689989000, "model": "gpt-3.5-turbo-instruct", "choices": [ {"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"} ], "usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3}, }
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_together.py
"""Test Together LLM""" from typing import cast from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.together import Together def test_together_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" llm = Together( together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, ) assert isinstance(llm.together_api_key, SecretStr) def test_together_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("TOGETHER_API_KEY", "secret-api-key") llm = Together( # type: ignore[call-arg] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, ) print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_together_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test that the API key is masked when passed via the constructor.""" llm = Together( together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, ) print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_together_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" llm = Together( together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, ) assert cast(SecretStr, llm.together_api_key).get_secret_value() == "secret-api-key"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_stochasticai.py
from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.llms.stochasticai import StochasticAI def test_api_key_is_string() -> None: llm = StochasticAI(stochasticai_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.stochasticai_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = StochasticAI(stochasticai_api_key="secret-api-key") # type: ignore[arg-type] print(llm.stochasticai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_writer.py
from typing import List from unittest import mock from unittest.mock import AsyncMock, MagicMock import pytest from langchain_core.callbacks import CallbackManager from pydantic import SecretStr from langchain_community.llms.writer import Writer from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler """Classes for mocking Writer responses.""" class Choice: def __init__(self, text: str): self.text = text class Completion: def __init__(self, choices: List[Choice]): self.choices = choices class StreamingData: def __init__(self, value: str): self.value = value @pytest.mark.requires("writerai") class TestWriterLLM: """Unit tests for Writer LLM integration.""" @pytest.fixture(autouse=True) def mock_unstreaming_completion(self) -> Completion: """Fixture providing a mock API response.""" return Completion(choices=[Choice(text="Hello! How can I help you?")]) @pytest.fixture(autouse=True) def mock_streaming_completion(self) -> List[StreamingData]: """Fixture providing mock streaming response chunks.""" return [ StreamingData(value="Hello! "), StreamingData(value="How can I"), StreamingData(value=" help you?"), ] def test_sync_unstream_completion( self, mock_unstreaming_completion: Completion ) -> None: """Test basic llm call with mocked response.""" mock_client = MagicMock() mock_client.completions.create.return_value = mock_unstreaming_completion llm = Writer(api_key=SecretStr("key")) with mock.patch.object(llm, "client", mock_client): response_text = llm.invoke(input="Hello") assert response_text == "Hello! How can I help you?" def test_sync_unstream_completion_with_params( self, mock_unstreaming_completion: Completion ) -> None: """Test llm call with passed params with mocked response.""" mock_client = MagicMock() mock_client.completions.create.return_value = mock_unstreaming_completion llm = Writer(api_key=SecretStr("key"), temperature=1) with mock.patch.object(llm, "client", mock_client): response_text = llm.invoke(input="Hello") assert response_text == "Hello! How can I help you?" @pytest.mark.asyncio async def test_async_unstream_completion( self, mock_unstreaming_completion: Completion ) -> None: """Test async chat completion with mocked response.""" mock_async_client = AsyncMock() mock_async_client.completions.create.return_value = mock_unstreaming_completion llm = Writer(api_key=SecretStr("key")) with mock.patch.object(llm, "async_client", mock_async_client): response_text = await llm.ainvoke(input="Hello") assert response_text == "Hello! How can I help you?" @pytest.mark.asyncio async def test_async_unstream_completion_with_params( self, mock_unstreaming_completion: Completion ) -> None: """Test async llm call with passed params with mocked response.""" mock_async_client = AsyncMock() mock_async_client.completions.create.return_value = mock_unstreaming_completion llm = Writer(api_key=SecretStr("key"), temperature=1) with mock.patch.object(llm, "async_client", mock_async_client): response_text = await llm.ainvoke(input="Hello") assert response_text == "Hello! How can I help you?" def test_sync_streaming_completion( self, mock_streaming_completion: List[StreamingData] ) -> None: """Test sync streaming.""" mock_client = MagicMock() mock_response = MagicMock() mock_response.__iter__.return_value = mock_streaming_completion mock_client.completions.create.return_value = mock_response llm = Writer(api_key=SecretStr("key")) with mock.patch.object(llm, "client", mock_client): response = llm.stream(input="Hello") response_message = "" for chunk in response: response_message += chunk assert response_message == "Hello! How can I help you?" def test_sync_streaming_completion_with_callback_handler( self, mock_streaming_completion: List[StreamingData] ) -> None: """Test sync streaming with callback handler.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) mock_client = MagicMock() mock_response = MagicMock() mock_response.__iter__.return_value = mock_streaming_completion mock_client.completions.create.return_value = mock_response llm = Writer( api_key=SecretStr("key"), callback_manager=callback_manager, ) with mock.patch.object(llm, "client", mock_client): response = llm.stream(input="Hello") response_message = "" for chunk in response: response_message += chunk assert callback_handler.llm_streams == 3 assert response_message == "Hello! How can I help you?" @pytest.mark.asyncio async def test_async_streaming_completion( self, mock_streaming_completion: Completion ) -> None: """Test async streaming with callback handler.""" mock_async_client = AsyncMock() mock_response = AsyncMock() mock_response.__aiter__.return_value = mock_streaming_completion mock_async_client.completions.create.return_value = mock_response llm = Writer(api_key=SecretStr("key")) with mock.patch.object(llm, "async_client", mock_async_client): response = llm.astream(input="Hello") response_message = "" async for chunk in response: response_message += str(chunk) assert response_message == "Hello! How can I help you?" @pytest.mark.asyncio async def test_async_streaming_completion_with_callback_handler( self, mock_streaming_completion: Completion ) -> None: """Test async streaming with callback handler.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) mock_async_client = AsyncMock() mock_response = AsyncMock() mock_response.__aiter__.return_value = mock_streaming_completion mock_async_client.completions.create.return_value = mock_response llm = Writer( api_key=SecretStr("key"), callback_manager=callback_manager, ) with mock.patch.object(llm, "async_client", mock_async_client): response = llm.astream(input="Hello") response_message = "" async for chunk in response: response_message += str(chunk) assert callback_handler.llm_streams == 3 assert response_message == "Hello! How can I help you?"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_imports.py
from langchain_core.language_models.llms import BaseLLM from langchain_community import llms EXPECT_ALL = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Aphrodite", "Arcee", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "BaichuanLLM", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CTranslate2", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "DeepSparse", "EdenAI", "FakeListLLM", "Fireworks", "ForefrontAI", "Friendli", "GigaChat", "GPT4All", "GooglePalm", "GooseAI", "GradientLLM", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "IpexLLM", "KoboldApiLLM", "Konko", "LlamaCpp", "Llamafile", "TextGen", "ManifestWrapper", "Minimax", "Mlflow", "MlflowAIGateway", "MLXPipeline", "Modal", "MosaicML", "Nebula", "OCIModelDeploymentLLM", "OCIModelDeploymentTGI", "OCIModelDeploymentVLLM", "OCIGenAI", "NIBittensorLLM", "NLPCloud", "Ollama", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "Outlines", "PaiEasEndpoint", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", "SambaNovaCloud", "SambaStudio", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "TitanTakeoff", "TitanTakeoffPro", "Together", "Tongyi", "VertexAI", "VertexAIModelGarden", "VLLM", "VLLMOpenAI", "WeightOnlyQuantPipeline", "Writer", "OctoAIEndpoint", "Xinference", "JavelinAIGateway", "QianfanLLMEndpoint", "YandexGPT", "Yuan2", "YiLLM", "You", "VolcEngineMaasLLM", "WatsonxLLM", "SparkLLM", ] def test_all_imports() -> None: """Simple test to make sure all things can be imported.""" for cls in llms.__all__: assert issubclass(getattr(llms, cls), BaseLLM) assert set(llms.__all__) == set(EXPECT_ALL)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_gradient_ai.py
from typing import Dict import pytest from pytest_mock import MockerFixture from langchain_community.llms import GradientLLM _MODEL_ID = "my_model_valid_id" _GRADIENT_SECRET = "secret_valid_token_123456" _GRADIENT_WORKSPACE_ID = "valid_workspace_12345" _GRADIENT_BASE_URL = "https://api.gradient.ai/api" class MockResponse: def __init__(self, json_data: Dict, status_code: int): self.json_data = json_data self.status_code = status_code def json(self) -> Dict: return self.json_data def mocked_requests_post(url: str, headers: dict, json: dict) -> MockResponse: assert url.startswith(_GRADIENT_BASE_URL) assert _MODEL_ID in url assert json assert headers assert headers.get("authorization") == f"Bearer {_GRADIENT_SECRET}" assert headers.get("x-gradient-workspace-id") == f"{_GRADIENT_WORKSPACE_ID}" query = json.get("query") assert query and isinstance(query, str) output = "bar" if "foo" in query else "baz" return MockResponse( json_data={"generatedOutput": output}, status_code=200, ) @pytest.mark.parametrize( "setup", [ dict( gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id=_GRADIENT_WORKSPACE_ID, model=_MODEL_ID, ), dict( gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id=_GRADIENT_WORKSPACE_ID, model_id=_MODEL_ID, ), ], ) def test_gradient_llm_sync(mocker: MockerFixture, setup: dict) -> None: mocker.patch("requests.post", side_effect=mocked_requests_post) llm = GradientLLM(**setup) assert llm.gradient_access_token == _GRADIENT_SECRET assert llm.gradient_api_url == _GRADIENT_BASE_URL assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID assert llm.model_id == _MODEL_ID response = llm.invoke("Say foo:") want = "bar" assert response == want @pytest.mark.parametrize( "setup", [ dict( gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id=_GRADIENT_WORKSPACE_ID, model=_MODEL_ID, ) ], ) def test_gradient_llm_sync_batch(mocker: MockerFixture, setup: dict) -> None: mocker.patch("requests.post", side_effect=mocked_requests_post) llm = GradientLLM(**setup) assert llm.gradient_access_token == _GRADIENT_SECRET assert llm.gradient_api_url == _GRADIENT_BASE_URL assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID assert llm.model_id == _MODEL_ID inputs = ["Say foo:", "Say baz:", "Say foo again"] response = llm._generate(inputs) want = ["bar", "baz", "bar"] assert len(response.generations) == len(inputs) for i, gen in enumerate(response.generations): assert gen[0].text == want[i]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_llamafile.py
import json from collections import deque from typing import Any, Dict import pytest import requests from pytest import MonkeyPatch from langchain_community.llms.llamafile import Llamafile def default_generation_params() -> Dict[str, Any]: return { "temperature": 0.8, "seed": -1, "top_k": 40, "top_p": 0.95, "min_p": 0.05, "n_predict": -1, "n_keep": 0, "tfs_z": 1.0, "typical_p": 1.0, "repeat_penalty": 1.1, "repeat_last_n": 64, "penalize_nl": True, "presence_penalty": 0.0, "frequency_penalty": 0.0, "mirostat": 0, "mirostat_tau": 5.0, "mirostat_eta": 0.1, } def mock_response() -> requests.Response: contents = json.dumps({"content": "the quick brown fox"}) response = requests.Response() response.status_code = 200 response._content = str.encode(contents) return response def mock_response_stream(): # type: ignore[no-untyped-def] mock_response = deque( [ b'data: {"content":"the","multimodal":false,"slot_id":0,"stop":false}\n\n', b'data: {"content":" quick","multimodal":false,"slot_id":0,"stop":false}\n\n', # noqa ] ) class MockRaw: def read(self, chunk_size): # type: ignore[no-untyped-def] try: return mock_response.popleft() except IndexError: return None response = requests.Response() response.status_code = 200 response.raw = MockRaw() return response def test_call(monkeypatch: MonkeyPatch) -> None: """ Test basic functionality of the `invoke` method """ llm = Llamafile( base_url="http://llamafile-host:8080", ) def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def] assert url == "http://llamafile-host:8080/completion" assert headers == { "Content-Type": "application/json", } # 'unknown' kwarg should be ignored assert json == {"prompt": "Test prompt", **default_generation_params()} assert stream is False assert timeout is None return mock_response() monkeypatch.setattr(requests, "post", mock_post) out = llm.invoke("Test prompt") assert out == "the quick brown fox" def test_call_with_kwargs(monkeypatch: MonkeyPatch) -> None: """ Test kwargs passed to `invoke` override the default values and are passed to the endpoint correctly. Also test that any 'unknown' kwargs that are not present in the LLM class attrs are ignored. """ llm = Llamafile( base_url="http://llamafile-host:8080", ) def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def] assert url == "http://llamafile-host:8080/completion" assert headers == { "Content-Type": "application/json", } # 'unknown' kwarg should be ignored expected = {"prompt": "Test prompt", **default_generation_params()} expected["seed"] = 0 assert json == expected assert stream is False assert timeout is None return mock_response() monkeypatch.setattr(requests, "post", mock_post) out = llm.invoke( "Test prompt", unknown="unknown option", # should be ignored seed=0, # should override the default ) assert out == "the quick brown fox" def test_call_raises_exception_on_missing_server(monkeypatch: MonkeyPatch) -> None: """ Test that the LLM raises a ConnectionError when no llamafile server is listening at the base_url. """ llm = Llamafile( # invalid url, nothing should actually be running here base_url="http://llamafile-host:8080", ) with pytest.raises(requests.exceptions.ConnectionError): llm.invoke("Test prompt") def test_streaming(monkeypatch: MonkeyPatch) -> None: """ Test basic functionality of `invoke` with streaming enabled. """ llm = Llamafile( base_url="http://llamafile-hostname:8080", streaming=True, ) def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def] assert url == "http://llamafile-hostname:8080/completion" assert headers == { "Content-Type": "application/json", } # 'unknown' kwarg should be ignored assert "unknown" not in json expected = {"prompt": "Test prompt", **default_generation_params()} expected["stream"] = True assert json == expected assert stream is True assert timeout is None return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) out = llm.invoke("Test prompt") assert out == "the quick"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/fake_llm.py
"""Fake LLM wrapper for testing purposes.""" from typing import Any, Dict, List, Mapping, Optional, cast from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models.llms import LLM from pydantic import validator class FakeLLM(LLM): """Fake LLM wrapper for testing purposes.""" queries: Optional[Mapping] = None sequential_responses: Optional[bool] = False response_index: int = 0 @validator("queries", always=True) def check_queries_required( cls, queries: Optional[Mapping], values: Mapping[str, Any] ) -> Optional[Mapping]: if values.get("sequential_response") and not queries: raise ValueError( "queries is required when sequential_response is set to True" ) return queries def get_num_tokens(self, text: str) -> int: """Return number of tokens.""" return len(text.split()) @property def _llm_type(self) -> str: """Return type of llm.""" return "fake" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return "foo" else: return "bar" @property def _identifying_params(self) -> Dict[str, Any]: return {} @property def _get_next_response_in_sequence(self) -> str: queries = cast(Mapping, self.queries) response = queries[list(queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_you.py
import pytest import requests_mock @pytest.mark.parametrize("endpoint", ("smart", "research")) @pytest.mark.requires("sseclient") def test_invoke( endpoint: str, requests_mock: requests_mock.Mocker, monkeypatch: pytest.MonkeyPatch ) -> None: from langchain_community.llms import You from langchain_community.llms.you import RESEARCH_ENDPOINT, SMART_ENDPOINT json = { "answer": ( "A solar eclipse occurs when the Moon passes between the Sun and Earth, " "casting a shadow on Earth and ..." ), "search_results": [ { "url": "https://en.wikipedia.org/wiki/Solar_eclipse", "name": "Solar eclipse - Wikipedia", "snippet": ( "A solar eclipse occurs when the Moon passes " "between Earth and the Sun, thereby obscuring the view of the Sun " "from a small part of Earth, totally or partially. " ), } ], } request_endpoint = SMART_ENDPOINT if endpoint == "smart" else RESEARCH_ENDPOINT requests_mock.post(request_endpoint, json=json) monkeypatch.setenv("YDC_API_KEY", "...") llm = You(endpoint=endpoint) output = llm.invoke("What is a solar eclipse?") assert output == json["answer"]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_aleph_alpha.py
"""Test Aleph Alpha specific stuff.""" import pytest from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.aleph_alpha import AlephAlpha @pytest.mark.requires("aleph_alpha_client") def test_api_key_is_secret_string() -> None: llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore assert isinstance(llm.aleph_alpha_api_key, SecretStr) @pytest.mark.requires("aleph_alpha_client") def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @pytest.mark.requires("aleph_alpha_client") def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key") llm = AlephAlpha() # type: ignore[call-arg] print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_bedrock.py
import json from typing import AsyncGenerator, Dict from unittest.mock import MagicMock, patch import pytest from langchain_community.llms.bedrock import ( ALTERNATION_ERROR, Bedrock, _human_assistant_format, ) TEST_CASES = { """Hey""": """ Human: Hey Assistant:""", """ Human: Hello Assistant:""": """ Human: Hello Assistant:""", """Human: Hello Assistant:""": """ Human: Hello Assistant:""", """ Human: Hello Assistant:""": """ Human: Hello Assistant:""", """ Human: Human: Hello Assistant:""": ( "Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'." ), """Human: Hello Assistant: Hello Human: Hello Assistant:""": """ Human: Hello Assistant: Hello Human: Hello Assistant:""", """ Human: Hello Assistant: Hello Human: Hello Assistant:""": """ Human: Hello Assistant: Hello Human: Hello Assistant:""", """ Human: Hello Assistant: Hello Human: Hello Assistant: Hello Assistant: Hello""": ALTERNATION_ERROR, """ Human: Hi. Assistant: Hi. Human: Hi. Human: Hi. Assistant:""": ALTERNATION_ERROR, """ Human: Hello""": """ Human: Hello Assistant:""", """ Human: Hello Hello Assistant""": """ Human: Hello Hello Assistant Assistant:""", """Hello Assistant:""": """ Human: Hello Assistant:""", """Hello Human: Hello """: """Hello Human: Hello Assistant:""", """ Human: Assistant: Hello""": """ Human: Assistant: Hello""", """ Human: Human Assistant: Assistant Human: Assistant Assistant: Human""": """ Human: Human Assistant: Assistant Human: Assistant Assistant: Human""", """ Assistant: Hello there, your name is: Human. Human: Hello there, your name is: Assistant.""": """ Human: Assistant: Hello there, your name is: Human. Human: Hello there, your name is: Assistant. Assistant:""", """ Human: Human: Hi Assistant: Hi""": ALTERNATION_ERROR, """Human: Hi Human: Hi""": ALTERNATION_ERROR, """ Assistant: Hi Human: Hi""": """ Human: Assistant: Hi Human: Hi Assistant:""", """ Human: Hi Assistant: Yo Human: Hey Assistant: Sup Human: Hi Assistant: Hi Human: Hi Assistant:""": """ Human: Hi Assistant: Yo Human: Hey Assistant: Sup Human: Hi Assistant: Hi Human: Hi Assistant:""", """ Hello. Human: Hello. Assistant:""": """ Hello. Human: Hello. Assistant:""", } def test__human_assistant_format() -> None: for input_text, expected_output in TEST_CASES.items(): if expected_output == ALTERNATION_ERROR: with pytest.warns(UserWarning, match=ALTERNATION_ERROR): _human_assistant_format(input_text) else: output = _human_assistant_format(input_text) assert output == expected_output # Sample mock streaming response data MOCK_STREAMING_RESPONSE = [ {"chunk": {"bytes": b'{"text": "nice"}'}}, {"chunk": {"bytes": b'{"text": " to meet"}'}}, {"chunk": {"bytes": b'{"text": " you"}'}}, ] async def async_gen_mock_streaming_response() -> AsyncGenerator[Dict, None]: for item in MOCK_STREAMING_RESPONSE: yield item async def test_bedrock_async_streaming_call() -> None: # Mock boto3 import mock_boto3 = MagicMock() session = MagicMock() session.region_name = "region" mock_boto3.Session.return_value = session mock_boto3.Session.return_value.client.return_value = ( session # Mocking the client method of the Session object ) with patch.dict( "sys.modules", {"boto3": mock_boto3} ): # Mocking boto3 at the top level using patch.dict # Mock the `Bedrock` class's method that invokes the model mock_invoke_method = MagicMock(return_value=async_gen_mock_streaming_response()) with patch.object( Bedrock, "_aprepare_input_and_invoke_stream", mock_invoke_method ): # Instantiate the Bedrock LLM llm = Bedrock( client=None, model_id="anthropic.claude-v2", streaming=True, ) # Call the _astream method chunks = [ json.loads(chunk["chunk"]["bytes"])["text"] # type: ignore async for chunk in llm._astream("Hey, how are you?") ] # Assertions assert len(chunks) == 3 assert chunks[0] == "nice" assert chunks[1] == " to meet" assert chunks[2] == " you"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_watsonxllm.py
"""Test WatsonxLLM API wrapper.""" from langchain_community.llms import WatsonxLLM def test_initialize_watsonxllm_bad_path_without_url() -> None: try: WatsonxLLM( model_id="google/flan-ul2", ) except ValueError as e: assert "WATSONX_URL" in e.__str__() def test_initialize_watsonxllm_cloud_bad_path() -> None: try: WatsonxLLM(model_id="google/flan-ul2", url="https://us-south.ml.cloud.ibm.com") except ValueError as e: assert "WATSONX_APIKEY" in e.__str__() def test_initialize_watsonxllm_cpd_bad_path_without_all() -> None: try: WatsonxLLM( model_id="google/flan-ul2", url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com", ) except ValueError as e: assert ( "WATSONX_APIKEY" in e.__str__() and "WATSONX_PASSWORD" in e.__str__() and "WATSONX_TOKEN" in e.__str__() ) def test_initialize_watsonxllm_cpd_bad_path_password_without_username() -> None: try: WatsonxLLM( model_id="google/flan-ul2", url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com", password="test_password", ) except ValueError as e: assert "WATSONX_USERNAME" in e.__str__() def test_initialize_watsonxllm_cpd_bad_path_apikey_without_username() -> None: try: WatsonxLLM( model_id="google/flan-ul2", url="https://cpd-zen.apps.cpd48.cp.fyre.ibm.com", apikey="test_apikey", ) except ValueError as e: assert "WATSONX_USERNAME" in e.__str__()
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_ollama.py
import requests from pytest import MonkeyPatch from langchain_community.llms.ollama import Ollama def mock_response_stream(): # type: ignore[no-untyped-def] mock_response = [b'{ "response": "Response chunk 1" }'] class MockRaw: def read(self, chunk_size): # type: ignore[no-untyped-def] try: return mock_response.pop() except IndexError: return None response = requests.Response() response.status_code = 200 response.raw = MockRaw() return response def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None: llm = Ollama( base_url="https://ollama-hostname:8000", model="foo", headers={ "Authorization": "Bearer TEST-TOKEN-VALUE", "Referer": "https://application-host", }, timeout=300, ) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", "Authorization": "Bearer TEST-TOKEN-VALUE", "Referer": "https://application-host", } assert json is not None assert stream is True assert timeout == 300 return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke("Test prompt") def test_pass_auth_if_provided(monkeypatch: MonkeyPatch) -> None: llm = Ollama( base_url="https://ollama-hostname:8000", model="foo", auth=("Test-User", "Test-Password"), timeout=300, ) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", } assert json is not None assert stream is True assert timeout == 300 assert auth == ("Test-User", "Test-Password") return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke("Test prompt") def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None: llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", } assert json is not None assert stream is True assert timeout == 300 return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke("Test prompt") def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None: """Test that top level params are sent to the endpoint as top level params""" llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", } assert json == { "format": None, "images": None, "model": "test-model", "options": { "mirostat": None, "mirostat_eta": None, "mirostat_tau": None, "num_ctx": None, "num_gpu": None, "num_thread": None, "num_predict": None, "repeat_last_n": None, "repeat_penalty": None, "stop": None, "temperature": None, "tfs_z": None, "top_k": None, "top_p": None, }, "prompt": "Test prompt", "system": "Test system prompt", "template": None, "keep_alive": None, "raw": None, } assert stream is True assert timeout == 300 return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke("Test prompt", model="test-model", system="Test system prompt") def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None: """ Test that params that are not top level params will be sent to the endpoint as options """ llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", } assert json == { "format": None, "images": None, "model": "foo", "options": { "mirostat": None, "mirostat_eta": None, "mirostat_tau": None, "num_ctx": None, "num_gpu": None, "num_thread": None, "num_predict": None, "repeat_last_n": None, "repeat_penalty": None, "stop": None, "temperature": 0.8, "tfs_z": None, "top_k": None, "top_p": None, "unknown": "Unknown parameter value", }, "prompt": "Test prompt", "system": None, "template": None, "keep_alive": None, "raw": None, } assert stream is True assert timeout == 300 return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke("Test prompt", unknown="Unknown parameter value", temperature=0.8) def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None: """ Test that if options provided it will be sent to the endpoint as options, ignoring other params that are not top level params. """ llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300) def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def] assert url == "https://ollama-hostname:8000/api/generate" assert headers == { "Content-Type": "application/json", } assert json == { "format": None, "images": None, "model": "test-another-model", "options": {"unknown_option": "Unknown option value"}, "prompt": "Test prompt", "system": None, "template": None, "keep_alive": None, "raw": None, } assert stream is True assert timeout == 300 return mock_response_stream() monkeypatch.setattr(requests, "post", mock_post) llm.invoke( "Test prompt", model="test-another-model", options={"unknown_option": "Unknown option value"}, unknown="Unknown parameter value", temperature=0.8, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_friendli.py
"""Test Friendli LLM.""" from unittest.mock import AsyncMock, MagicMock, Mock import pytest from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.adapters.openai import aenumerate from langchain_community.llms.friendli import Friendli @pytest.fixture def mock_friendli_client() -> Mock: """Mock instance of Friendli client.""" return Mock() @pytest.fixture def mock_friendli_async_client() -> AsyncMock: """Mock instance of Friendli async client.""" return AsyncMock() @pytest.fixture def friendli_llm( mock_friendli_client: Mock, mock_friendli_async_client: AsyncMock ) -> Friendli: """Friendli LLM with mock clients.""" return Friendli( friendli_token=SecretStr("personal-access-token"), client=mock_friendli_client, async_client=mock_friendli_async_client, ) @pytest.mark.requires("friendli") def test_friendli_token_is_secret_string(capsys: CaptureFixture) -> None: """Test if friendli token is stored as a SecretStr.""" fake_token_value = "personal-access-token" chat = Friendli(friendli_token=fake_token_value) # type: ignore[arg-type] assert isinstance(chat.friendli_token, SecretStr) assert chat.friendli_token.get_secret_value() == fake_token_value print(chat.friendli_token, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @pytest.mark.requires("friendli") def test_friendli_token_read_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test if friendli token can be parsed from environment.""" fake_token_value = "personal-access-token" monkeypatch.setenv("FRIENDLI_TOKEN", fake_token_value) chat = Friendli() assert isinstance(chat.friendli_token, SecretStr) assert chat.friendli_token.get_secret_value() == fake_token_value print(chat.friendli_token, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @pytest.mark.requires("friendli") def test_friendli_invoke(mock_friendli_client: Mock, friendli_llm: Friendli) -> None: """Test invocation with friendli.""" mock_choice = Mock() mock_choice.text = "Hello Friendli" mock_response = Mock() mock_response.choices = [mock_choice] mock_friendli_client.completions.create.return_value = mock_response result = friendli_llm.invoke("Hello langchain") assert result == "Hello Friendli" mock_friendli_client.completions.create.assert_called_once_with( model=friendli_llm.model, prompt="Hello langchain", stream=False, frequency_penalty=None, presence_penalty=None, max_tokens=None, stop=None, temperature=None, top_p=None, ) @pytest.mark.requires("friendli") async def test_friendli_ainvoke( mock_friendli_async_client: AsyncMock, friendli_llm: Friendli ) -> None: """Test async invocation with friendli.""" mock_choice = Mock() mock_choice.text = "Hello Friendli" mock_response = Mock() mock_response.choices = [mock_choice] mock_friendli_async_client.completions.create.return_value = mock_response result = await friendli_llm.ainvoke("Hello langchain") assert result == "Hello Friendli" mock_friendli_async_client.completions.create.assert_awaited_once_with( model=friendli_llm.model, prompt="Hello langchain", stream=False, frequency_penalty=None, presence_penalty=None, max_tokens=None, stop=None, temperature=None, top_p=None, ) @pytest.mark.requires("friendli") def test_friendli_stream(mock_friendli_client: Mock, friendli_llm: Friendli) -> None: """Test stream with friendli.""" mock_chunk_0 = Mock() mock_chunk_0.event = "token_sampled" mock_chunk_0.text = "Hello " mock_chunk_0.token = 0 mock_chunk_1 = Mock() mock_chunk_1.event = "token_sampled" mock_chunk_1.text = "Friendli" mock_chunk_1.token = 1 mock_stream = MagicMock() mock_chunks = [mock_chunk_0, mock_chunk_1] mock_stream.__iter__.return_value = mock_chunks mock_friendli_client.completions.create.return_value = mock_stream stream = friendli_llm.stream("Hello langchain") for i, chunk in enumerate(stream): assert chunk == mock_chunks[i].text mock_friendli_client.completions.create.assert_called_once_with( model=friendli_llm.model, prompt="Hello langchain", stream=True, frequency_penalty=None, presence_penalty=None, max_tokens=None, stop=None, temperature=None, top_p=None, ) @pytest.mark.requires("friendli") async def test_friendli_astream( mock_friendli_async_client: AsyncMock, friendli_llm: Friendli ) -> None: """Test async stream with friendli.""" mock_chunk_0 = Mock() mock_chunk_0.event = "token_sampled" mock_chunk_0.text = "Hello " mock_chunk_0.token = 0 mock_chunk_1 = Mock() mock_chunk_1.event = "token_sampled" mock_chunk_1.text = "Friendli" mock_chunk_1.token = 1 mock_stream = AsyncMock() mock_chunks = [mock_chunk_0, mock_chunk_1] mock_stream.__aiter__.return_value = mock_chunks mock_friendli_async_client.completions.create.return_value = mock_stream stream = friendli_llm.astream("Hello langchain") async for i, chunk in aenumerate(stream): assert chunk == mock_chunks[i].text mock_friendli_async_client.completions.create.assert_awaited_once_with( model=friendli_llm.model, prompt="Hello langchain", stream=True, frequency_penalty=None, presence_penalty=None, max_tokens=None, stop=None, temperature=None, top_p=None, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_fireworks.py
"""Test Fireworks chat model""" import sys import pytest from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.llms import Fireworks if sys.version_info < (3, 9): pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True) @pytest.mark.requires("fireworks") def test_api_key_is_string() -> None: llm = Fireworks(fireworks_api_key="secret-api-key") assert isinstance(llm.fireworks_api_key, SecretStr) @pytest.mark.requires("fireworks") def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = Fireworks(fireworks_api_key="secret-api-key") print(llm.fireworks_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_predibase.py
from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.llms.predibase import Predibase def test_api_key_is_string() -> None: llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.predibase_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] print(llm.predibase_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_specifying_predibase_sdk_version_argument() -> None: llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] ) assert not llm.predibase_sdk_version legacy_predibase_sdk_version = "2024.4.8" llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] predibase_sdk_version=legacy_predibase_sdk_version, ) assert llm.predibase_sdk_version == legacy_predibase_sdk_version def test_specifying_adapter_id_argument() -> None: llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert not llm.adapter_id llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-hf-adapter", ) assert llm.adapter_id == "my-hf-adapter" assert llm.adapter_version is None llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-other-hf-adapter", ) assert llm.adapter_id == "my-other-hf-adapter" assert llm.adapter_version is None def test_specifying_adapter_id_and_adapter_version_arguments() -> None: llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert not llm.adapter_id llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-hf-adapter", adapter_version=None, ) assert llm.adapter_id == "my-hf-adapter" assert llm.adapter_version is None llm = Predibase( # type: ignore[call-arg] model="my_llm", predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-other-hf-adapter", adapter_version=3, ) assert llm.adapter_id == "my-other-hf-adapter" assert llm.adapter_version == 3
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_yandex.py
import os from unittest import mock from unittest.mock import MagicMock import pytest from langchain_community.llms.yandex import YandexGPT def test_yandexgpt_initialization() -> None: llm = YandexGPT( iam_token="your_iam_token", # type: ignore[arg-type] api_key="your_api_key", # type: ignore[arg-type] folder_id="your_folder_id", ) assert llm.model_name == "yandexgpt-lite" assert llm.model_uri.startswith("gpt://your_folder_id/yandexgpt-lite/") def test_yandexgpt_model_params() -> None: llm = YandexGPT( model_name="custom-model", model_version="v1", iam_token="your_iam_token", # type: ignore[arg-type] api_key="your_api_key", # type: ignore[arg-type] folder_id="your_folder_id", ) assert llm.model_name == "custom-model" assert llm.model_version == "v1" assert llm.iam_token.get_secret_value() == "your_iam_token" assert llm.model_uri == "gpt://your_folder_id/custom-model/v1" def test_yandexgpt_invalid_model_params() -> None: with pytest.raises(ValueError): YandexGPT(model_uri="", iam_token="your_iam_token") # type: ignore[arg-type] with pytest.raises(ValueError): YandexGPT( iam_token="", # type: ignore[arg-type] api_key="your_api_key", # type: ignore[arg-type] model_uri="", ) @pytest.mark.parametrize( "api_key_or_token", [dict(api_key="bogus"), dict(iam_token="bogus")] ) @pytest.mark.parametrize( "disable_logging", [dict(), dict(disable_request_logging=True), dict(disable_request_logging=False)], ) @mock.patch.dict(os.environ, {}, clear=True) def test_completion_call(api_key_or_token: dict, disable_logging: dict) -> None: absent_yandex_module_stub = MagicMock() grpc_mock = MagicMock() with mock.patch.dict( "sys.modules", { "yandex.cloud.ai.foundation_models.v1." "text_common_pb2": absent_yandex_module_stub, "yandex.cloud.ai.foundation_models.v1.text_generation." "text_generation_service_pb2": absent_yandex_module_stub, "yandex.cloud.ai.foundation_models.v1.text_generation." "text_generation_service_pb2_grpc": absent_yandex_module_stub, "grpc": grpc_mock, }, ): grpc_mock.RpcError = Exception stub = absent_yandex_module_stub.TextGenerationServiceStub request_stub = absent_yandex_module_stub.CompletionRequest msg_constructor_stub = absent_yandex_module_stub.Message args = {"folder_id": "fldr", **api_key_or_token, **disable_logging} ygpt = YandexGPT(**args) grpc_call_mock = stub.return_value.Completion msg_mock = mock.Mock() msg_mock.message.text = "cmpltn" res_mock = mock.Mock() res_mock.alternatives = [msg_mock] grpc_call_mock.return_value = [res_mock] act_emb = ygpt.invoke("nomatter") assert act_emb == "cmpltn" assert len(grpc_call_mock.call_args_list) == 1 once_called_args = grpc_call_mock.call_args_list[0] act_model_uri = request_stub.call_args_list[0].kwargs["model_uri"] act_text = msg_constructor_stub.call_args_list[0].kwargs["text"] act_metadata = once_called_args.kwargs["metadata"] assert "fldr" in act_model_uri assert act_text == "nomatter" assert act_metadata assert len(act_metadata) > 0 if disable_logging.get("disable_request_logging"): assert ("x-data-logging-enabled", "false") in act_metadata
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/fake_chat_model.py
"""Fake Chat Model wrapper for testing purposes.""" from typing import Any, Dict, List, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.language_models.chat_models import SimpleChatModel from langchain_core.messages import AIMessage, BaseMessage from langchain_core.outputs import ChatGeneration, ChatResult class FakeChatModel(SimpleChatModel): """Fake Chat Model wrapper for testing purposes.""" def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: return "fake response" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = "fake response" message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @property def _llm_type(self) -> str: return "fake-chat-model" @property def _identifying_params(self) -> Dict[str, Any]: return {"key": "fake"}
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_databricks.py
"""test Databricks LLM""" from pathlib import Path from typing import Any, Dict import pytest from pytest import MonkeyPatch from langchain_community.llms.databricks import ( Databricks, _load_pickled_fn_from_hex_string, ) from langchain_community.llms.loading import load_llm from tests.integration_tests.llms.utils import assert_llm_equality class MockDatabricksServingEndpointClient: def __init__( self, host: str, api_token: str, endpoint_name: str, databricks_uri: str, task: str, ): self.host = host self.api_token = api_token self.endpoint_name = endpoint_name self.databricks_uri = databricks_uri self.task = task def transform_input(**request: Any) -> Dict[str, Any]: request["messages"] = [{"role": "user", "content": request["prompt"]}] del request["prompt"] return request @pytest.mark.requires("cloudpickle") def test_serde_transform_input_fn(monkeypatch: MonkeyPatch) -> None: import cloudpickle monkeypatch.setattr( "langchain_community.llms.databricks._DatabricksServingEndpointClient", MockDatabricksServingEndpointClient, ) monkeypatch.setenv("DATABRICKS_HOST", "my-default-host") monkeypatch.setenv("DATABRICKS_TOKEN", "my-default-token") llm = Databricks( endpoint_name="some_end_point_name", # Value should not matter for this test transform_input_fn=transform_input, allow_dangerous_deserialization=True, ) params = llm._default_params pickled_string = cloudpickle.dumps(transform_input).hex() assert params["transform_input_fn"] == pickled_string request = {"prompt": "What is the meaning of life?"} fn = _load_pickled_fn_from_hex_string( data=params["transform_input_fn"], allow_dangerous_deserialization=True, ) assert fn(**request) == transform_input(**request) def test_saving_loading_llm(monkeypatch: MonkeyPatch, tmp_path: Path) -> None: monkeypatch.setattr( "langchain_community.llms.databricks._DatabricksServingEndpointClient", MockDatabricksServingEndpointClient, ) monkeypatch.setenv("DATABRICKS_HOST", "my-default-host") monkeypatch.setenv("DATABRICKS_TOKEN", "my-default-token") llm = Databricks( endpoint_name="chat", temperature=0.1, ) llm.save(file_path=tmp_path / "databricks.yaml") loaded_llm = load_llm(tmp_path / "databricks.yaml") assert_llm_equality(llm, loaded_llm) @pytest.mark.requires("cloudpickle") def test_saving_loading_llm_dangerous_serde_check( monkeypatch: MonkeyPatch, tmp_path: Path ) -> None: monkeypatch.setattr( "langchain_community.llms.databricks._DatabricksServingEndpointClient", MockDatabricksServingEndpointClient, ) monkeypatch.setenv("DATABRICKS_HOST", "my-default-host") monkeypatch.setenv("DATABRICKS_TOKEN", "my-default-token") llm1 = Databricks( endpoint_name="chat", temperature=0.1, transform_input_fn=lambda x, y, **kwargs: {}, ) llm1.save(file_path=tmp_path / "databricks1.yaml") with pytest.raises(ValueError, match="This code relies on the pickle module."): load_llm(tmp_path / "databricks1.yaml") load_llm(tmp_path / "databricks1.yaml", allow_dangerous_deserialization=True) llm2 = Databricks( endpoint_name="chat", temperature=0.1, transform_output_fn=lambda x: "test" ) llm2.save(file_path=tmp_path / "databricks2.yaml") with pytest.raises(ValueError, match="This code relies on the pickle module."): load_llm(tmp_path / "databricks2.yaml") load_llm(tmp_path / "databricks2.yaml", allow_dangerous_deserialization=True)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_anyscale.py
"""Test Anyscale llm""" import pytest from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.anyscale import Anyscale @pytest.mark.requires("openai") def test_api_key_is_secret_string() -> None: llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") # type: ignore[arg-type] assert isinstance(llm.anyscale_api_key, SecretStr) @pytest.mark.requires("openai") def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("ANYSCALE_API_KEY", "secret-api-key") llm = Anyscale(anyscale_api_base="test") print(llm.anyscale_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @pytest.mark.requires("openai") def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") # type: ignore[arg-type] print(llm.anyscale_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_bananadev.py
"""Test Banana llm""" from typing import cast from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.bananadev import Banana def test_api_key_is_secret_string() -> None: llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.banana_api_key, SecretStr) def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("BANANA_API_KEY", "secret-api-key") llm = Banana() print(llm.banana_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] print(llm.banana_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] assert cast(SecretStr, llm.banana_api_key).get_secret_value() == "secret-api-key"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_oci_model_deployment_endpoint.py
# Copyright (c) 2023, 2024, Oracle and/or its affiliates. """Test LLM for OCI Data Science Model Deployment Endpoint.""" import sys from typing import Any, AsyncGenerator, Dict, Generator from unittest import mock import pytest from requests.exceptions import HTTPError from langchain_community.llms.oci_data_science_model_deployment_endpoint import ( OCIModelDeploymentTGI, OCIModelDeploymentVLLM, ) CONST_MODEL_NAME = "odsc-vllm" CONST_ENDPOINT = "https://oci.endpoint/ocid/predict" CONST_PROMPT = "This is a prompt." CONST_COMPLETION = "This is a completion." CONST_COMPLETION_RESPONSE = { "choices": [ { "index": 0, "text": CONST_COMPLETION, "logprobs": 0.1, "finish_reason": "length", } ], } CONST_COMPLETION_RESPONSE_TGI = {"generated_text": CONST_COMPLETION} CONST_STREAM_TEMPLATE = ( 'data: {"id":"","object":"text_completion","created":123456,' + '"choices":[{"index":0,"text":"<TOKEN>","finish_reason":""}]}' ) CONST_STREAM_RESPONSE = ( CONST_STREAM_TEMPLATE.replace("<TOKEN>", " " + word).encode() for word in CONST_COMPLETION.split(" ") ) CONST_ASYNC_STREAM_TEMPLATE = ( '{"id":"","object":"text_completion","created":123456,' + '"choices":[{"index":0,"text":"<TOKEN>","finish_reason":""}]}' ) CONST_ASYNC_STREAM_RESPONSE = ( CONST_ASYNC_STREAM_TEMPLATE.replace("<TOKEN>", " " + word).encode() for word in CONST_COMPLETION.split(" ") ) pytestmark = pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires Python 3.9 or higher" ) class MockResponse: """Represents a mocked response.""" def __init__(self, json_data: Dict, status_code: int = 200) -> None: self.json_data = json_data self.status_code = status_code def raise_for_status(self) -> None: """Mocked raise for status.""" if 400 <= self.status_code < 600: raise HTTPError() def json(self) -> Dict: """Returns mocked json data.""" return self.json_data def iter_lines(self, chunk_size: int = 4096) -> Generator[bytes, None, None]: """Returns a generator of mocked streaming response.""" return CONST_STREAM_RESPONSE @property def text(self) -> str: """Returns the mocked text representation.""" return "" def mocked_requests_post(url: str, **kwargs: Any) -> MockResponse: """Method to mock post requests""" payload: dict = kwargs.get("json", {}) if "inputs" in payload: prompt = payload.get("inputs") is_tgi = True else: prompt = payload.get("prompt") is_tgi = False if prompt == CONST_PROMPT: if is_tgi: return MockResponse(json_data=CONST_COMPLETION_RESPONSE_TGI) return MockResponse(json_data=CONST_COMPLETION_RESPONSE) return MockResponse( json_data={}, status_code=404, ) async def mocked_async_streaming_response( *args: Any, **kwargs: Any ) -> AsyncGenerator[bytes, None]: """Returns mocked response for async streaming.""" for item in CONST_ASYNC_STREAM_RESPONSE: yield item @pytest.mark.requires("ads") @mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None)) @mock.patch("requests.post", side_effect=mocked_requests_post) def test_invoke_vllm(*args: Any) -> None: """Tests invoking vLLM endpoint.""" llm = OCIModelDeploymentVLLM(endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME) output = llm.invoke(CONST_PROMPT) assert output == CONST_COMPLETION @pytest.mark.requires("ads") @mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None)) @mock.patch("requests.post", side_effect=mocked_requests_post) def test_stream_tgi(*args: Any) -> None: """Tests streaming with TGI endpoint using OpenAI spec.""" llm = OCIModelDeploymentTGI( endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME, streaming=True ) output = "" count = 0 for chunk in llm.stream(CONST_PROMPT): output += chunk count += 1 assert count == 4 assert output.strip() == CONST_COMPLETION @pytest.mark.requires("ads") @mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None)) @mock.patch("requests.post", side_effect=mocked_requests_post) def test_generate_tgi(*args: Any) -> None: """Tests invoking TGI endpoint using TGI generate spec.""" llm = OCIModelDeploymentTGI( endpoint=CONST_ENDPOINT, api="/generate", model=CONST_MODEL_NAME ) output = llm.invoke(CONST_PROMPT) assert output == CONST_COMPLETION @pytest.mark.asyncio @pytest.mark.requires("ads") @mock.patch( "ads.common.auth.default_signer", return_value=dict(signer=mock.MagicMock()) ) @mock.patch( "langchain_community.utilities.requests.Requests.apost", mock.MagicMock(), ) async def test_stream_async(*args: Any) -> None: """Tests async streaming.""" llm = OCIModelDeploymentTGI( endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME, streaming=True ) with mock.patch.object( llm, "_aiter_sse", mock.MagicMock(return_value=mocked_async_streaming_response()), ): chunks = [chunk async for chunk in llm.astream(CONST_PROMPT)] assert "".join(chunks).strip() == CONST_COMPLETION
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_gooseai.py
"""Test GooseAI""" import pytest from pydantic import SecretStr from pytest import MonkeyPatch from langchain_community.llms.gooseai import GooseAI from langchain_community.utils.openai import is_openai_v1 def _openai_v1_installed() -> bool: try: return is_openai_v1() except Exception as _: return False @pytest.mark.requires("openai") def test_api_key_is_secret_string() -> None: llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.gooseai_api_key, SecretStr) assert llm.gooseai_api_key.get_secret_value() == "secret-api-key" @pytest.mark.skipif( _openai_v1_installed(), reason="GooseAI currently only works with openai<1" ) @pytest.mark.requires("openai") def test_api_key_masked_when_passed_via_constructor() -> None: llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert str(llm.gooseai_api_key) == "**********" assert "secret-api-key" not in repr(llm.gooseai_api_key) assert "secret-api-key" not in repr(llm) @pytest.mark.skipif( _openai_v1_installed(), reason="GooseAI currently only works with openai<1" ) @pytest.mark.requires("openai") def test_api_key_masked_when_passed_from_env() -> None: with MonkeyPatch.context() as mp: mp.setenv("GOOSEAI_API_KEY", "secret-api-key") llm = GooseAI() # type: ignore[call-arg] assert str(llm.gooseai_api_key) == "**********" assert "secret-api-key" not in repr(llm.gooseai_api_key) assert "secret-api-key" not in repr(llm)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_loading.py
"""Test LLM saving and loading functions.""" from pathlib import Path from unittest.mock import patch from langchain_community.llms.loading import load_llm from tests.unit_tests.llms.fake_llm import FakeLLM @patch( "langchain_community.llms.loading.get_type_to_cls_dict", lambda: {"fake": lambda: FakeLLM}, ) def test_saving_loading_round_trip(tmp_path: Path) -> None: """Test saving/loading a Fake LLM.""" fake_llm = FakeLLM() fake_llm.save(file_path=tmp_path / "fake_llm.yaml") loaded_llm = load_llm(tmp_path / "fake_llm.yaml") assert loaded_llm == fake_llm
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_forefrontai.py
"""Test ForeFrontAI LLM""" from typing import cast from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.forefrontai import ForefrontAI def test_forefrontai_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" llm = ForefrontAI(forefrontai_api_key="secret-api-key", temperature=0.2) # type: ignore[arg-type] assert isinstance(llm.forefrontai_api_key, SecretStr) def test_forefrontai_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("FOREFRONTAI_API_KEY", "secret-api-key") llm = ForefrontAI(temperature=0.2) # type: ignore[call-arg] print(llm.forefrontai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_forefrontai_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test that the API key is masked when passed via the constructor.""" llm = ForefrontAI( forefrontai_api_key="secret-api-key", # type: ignore[arg-type] temperature=0.2, ) print(llm.forefrontai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_forefrontai_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" llm = ForefrontAI( forefrontai_api_key="secret-api-key", # type: ignore[arg-type] temperature=0.2, ) assert ( cast(SecretStr, llm.forefrontai_api_key).get_secret_value() == "secret-api-key" )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/konko.py
"""Test Konko API wrapper. In order to run this test, you need to have an Konko api key. You'll then need to set KONKO_API_KEY environment variable to your api key. """ import pytest as pytest from langchain_community.llms import Konko def test_konko_call() -> None: """Test simple call to konko.""" llm = Konko( model="mistralai/mistral-7b-v0.1", temperature=0.2, max_tokens=250, ) output = llm.invoke("Say foo:") assert llm._llm_type == "konko" assert isinstance(output, str) async def test_konko_acall() -> None: """Test simple call to konko.""" llm = Konko( model="mistralai/mistral-7b-v0.1", temperature=0.2, max_tokens=250, ) output = await llm.agenerate(["Say foo:"], stop=["bar"]) assert llm._llm_type == "konko" output_text = output.generations[0][0].text assert isinstance(output_text, str) assert output_text.count("bar") <= 1
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/__init__.py
"""All unit tests for LLM objects."""
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_moonshot.py
import os import pytest from langchain_community.llms.moonshot import Moonshot os.environ["MOONSHOT_API_KEY"] = "key" @pytest.mark.requires("openai") def test_moonshot_model_param() -> None: llm = Moonshot(model="foo") # type: ignore[call-arg] assert llm.model_name == "foo" llm = Moonshot(model_name="bar") # type: ignore[call-arg] assert llm.model_name == "bar"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_outlines.py
import pytest from _pytest.monkeypatch import MonkeyPatch from langchain_community.llms.outlines import Outlines def test_outlines_initialization(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) llm = Outlines( model="microsoft/Phi-3-mini-4k-instruct", max_tokens=42, stop=["\n"], ) assert llm.model == "microsoft/Phi-3-mini-4k-instruct" assert llm.max_tokens == 42 assert llm.backend == "transformers" assert llm.stop == ["\n"] def test_outlines_backend_llamacpp(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) llm = Outlines( model="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf", backend="llamacpp", ) assert llm.backend == "llamacpp" def test_outlines_backend_vllm(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", backend="vllm") assert llm.backend == "vllm" def test_outlines_backend_mlxlm(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", backend="mlxlm") assert llm.backend == "mlxlm" def test_outlines_with_regex(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) regex = r"\d{3}-\d{3}-\d{4}" llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", regex=regex) assert llm.regex == regex def test_outlines_with_type_constraints(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", type_constraints=int) assert llm.type_constraints == int # noqa def test_outlines_with_json_schema(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) from pydantic import BaseModel, Field class TestSchema(BaseModel): name: str = Field(description="A person's name") age: int = Field(description="A person's age") llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", json_schema=TestSchema) assert llm.json_schema == TestSchema def test_outlines_with_grammar(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) grammar = """ ?start: expression ?expression: term (("+" | "-") term)* ?term: factor (("*" | "/") factor)* ?factor: NUMBER | "-" factor | "(" expression ")" %import common.NUMBER """ llm = Outlines(model="microsoft/Phi-3-mini-4k-instruct", grammar=grammar) assert llm.grammar == grammar def test_raise_for_multiple_output_constraints(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(Outlines, "build_client", lambda self: self) with pytest.raises(ValueError): Outlines( model="microsoft/Phi-3-mini-4k-instruct", type_constraints=int, regex=r"\d{3}-\d{3}-\d{4}", ) Outlines( model="microsoft/Phi-3-mini-4k-instruct", type_constraints=int, regex=r"\d{3}-\d{3}-\d{4}", )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_utils.py
"""Test LLM utility functions.""" from langchain_community.llms.utils import enforce_stop_tokens def test_enforce_stop_tokens() -> None: """Test removing stop tokens when they occur.""" text = "foo bar baz" output = enforce_stop_tokens(text, ["moo", "baz"]) assert output == "foo bar " text = "foo bar baz" output = enforce_stop_tokens(text, ["moo", "baz", "bar"]) assert output == "foo " text = "foo bar baz" output = enforce_stop_tokens(text, ["moo", "bar"]) assert output == "foo " def test_enforce_stop_tokens_none() -> None: """Test removing stop tokens when they do not occur.""" text = "foo bar baz" output = enforce_stop_tokens(text, ["moo"]) assert output == "foo bar baz"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/llms/test_minimax.py
"""Test Minimax llm""" from typing import cast from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.minimax import Minimax def test_api_key_is_secret_string() -> None: llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] assert isinstance(llm.minimax_api_key, SecretStr) def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("MINIMAX_API_KEY", "secret-api-key") monkeypatch.setenv("MINIMAX_GROUP_ID", "group_id") llm = Minimax() # type: ignore[call-arg] print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] assert cast(SecretStr, llm.minimax_api_key).get_secret_value() == "secret-api-key"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_zapier.py
"""Test building the Zapier tool, not running it.""" from unittest.mock import MagicMock, patch import pytest import requests from langchain_community.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT from langchain_community.tools.zapier.tool import ZapierNLARunAction from langchain_community.utilities.zapier import ZapierNLAWrapper def test_default_base_prompt() -> None: """Test that the default prompt is being inserted.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper( zapier_nla_api_key="test", zapier_nla_oauth_access_token="" ), ) # Test that the base prompt was successfully assigned to the default prompt assert tool.base_prompt == BASE_ZAPIER_TOOL_PROMPT assert tool.description == BASE_ZAPIER_TOOL_PROMPT.format( zapier_description="test", params=str(list({"test": "test"}.keys())), ) def test_custom_base_prompt() -> None: """Test that a custom prompt is being inserted.""" base_prompt = "Test. {zapier_description} and {params}." tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, base_prompt=base_prompt, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) # Test that the base prompt was successfully assigned to the default prompt assert tool.base_prompt == base_prompt assert tool.description == "Test. test and ['test']." def test_custom_base_prompt_fail() -> None: """Test validating an invalid custom prompt.""" base_prompt = "Test. {zapier_description}." with pytest.raises(ValueError): ZapierNLARunAction( action_id="test", zapier_description="test", params={"test": "test"}, base_prompt=base_prompt, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) def test_format_headers_api_key() -> None: """Test that the action headers is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) headers = tool.api_wrapper._format_headers() assert headers["Content-Type"] == "application/json" assert headers["Accept"] == "application/json" assert headers["X-API-Key"] == "test" def test_format_headers_access_token() -> None: """Test that the action headers is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper(zapier_nla_oauth_access_token="test"), # type: ignore[call-arg] ) headers = tool.api_wrapper._format_headers() assert headers["Content-Type"] == "application/json" assert headers["Accept"] == "application/json" assert headers["Authorization"] == "Bearer test" def test_create_action_payload() -> None: """Test that the action payload is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload("some instructions") assert payload["instructions"] == "some instructions" assert payload.get("preview_only") is None def test_create_action_payload_preview() -> None: """Test that the action payload with preview is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload( "some instructions", preview_only=True, ) assert payload["instructions"] == "some instructions" assert payload["preview_only"] is True def test_create_action_payload_with_params() -> None: """Test that the action payload with params is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload( "some instructions", {"test": "test"}, preview_only=True, ) assert payload["instructions"] == "some instructions" assert payload["preview_only"] is True assert payload["test"] == "test" async def test_apreview(mocker) -> None: # type: ignore[no-untyped-def] """Test that the action payload with params is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), ) mockObj = mocker.patch.object(ZapierNLAWrapper, "_arequest") await tool.api_wrapper.apreview( "random_action_id", "some instructions", {"test": "test"}, ) mockObj.assert_called_once_with( "POST", "http://localhost:8080/v1/exposed/random_action_id/execute/", json={ "instructions": "some instructions", "preview_only": True, "test": "test", }, ) async def test_arun(mocker) -> None: # type: ignore[no-untyped-def] """Test that the action payload with params is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), ) mockObj = mocker.patch.object(ZapierNLAWrapper, "_arequest") await tool.api_wrapper.arun( "random_action_id", "some instructions", {"test": "test"}, ) mockObj.assert_called_once_with( "POST", "http://localhost:8080/v1/exposed/random_action_id/execute/", json={"instructions": "some instructions", "test": "test"}, ) async def test_alist(mocker) -> None: # type: ignore[no-untyped-def] """Test that the action payload with params is being created correctly.""" tool = ZapierNLARunAction( action_id="test", zapier_description="test", params_schema={"test": "test"}, api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), ) mockObj = mocker.patch.object(ZapierNLAWrapper, "_arequest") await tool.api_wrapper.alist() mockObj.assert_called_once_with( "GET", "http://localhost:8080/v1/exposed/", ) def test_wrapper_fails_no_api_key_or_access_token_initialization() -> None: """Test Wrapper requires either an API Key or OAuth Access Token.""" with pytest.raises(ValueError): ZapierNLAWrapper() # type: ignore[call-arg] def test_wrapper_api_key_initialization() -> None: """Test Wrapper initializes with an API Key.""" ZapierNLAWrapper(zapier_nla_api_key="test") # type: ignore[call-arg] def test_wrapper_access_token_initialization() -> None: """Test Wrapper initializes with an API Key.""" ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] def test_list_raises_401_invalid_api_key() -> None: """Test that a valid error is raised when the API Key is invalid.""" mock_response = MagicMock() mock_response.status_code = 401 mock_response.raise_for_status.side_effect = requests.HTTPError( "401 Client Error: Unauthorized for url: " "https://nla.zapier.com/api/v1/exposed/", response=mock_response, ) mock_session = MagicMock() mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): wrapper = ZapierNLAWrapper(zapier_nla_api_key="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() assert str(err.value).startswith( "An unauthorized response occurred. Check that your api key is correct. " "Err:" ) def test_list_raises_401_invalid_access_token() -> None: """Test that a valid error is raised when the API Key is invalid.""" mock_response = MagicMock() mock_response.status_code = 401 mock_response.raise_for_status.side_effect = requests.HTTPError( "401 Client Error: Unauthorized for url: " "https://nla.zapier.com/api/v1/exposed/", response=mock_response, ) mock_session = MagicMock() mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() assert str(err.value).startswith( "An unauthorized response occurred. Check that your access token is " "correct and doesn't need to be refreshed. Err:" ) def test_list_raises_other_error() -> None: """Test that a valid error is raised when an unknown HTTP Error occurs.""" mock_response = MagicMock() mock_response.status_code = 404 mock_response.raise_for_status.side_effect = requests.HTTPError( "404 Client Error: Not found for url", response=mock_response, ) mock_session = MagicMock() mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() assert str(err.value) == "404 Client Error: Not found for url"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_signatures.py
"""Test base tool child implementations.""" import inspect import re from typing import List, Type import pytest from langchain_core.tools import BaseTool from langchain_community.tools.amadeus.base import AmadeusBaseTool from langchain_community.tools.gmail.base import GmailBaseTool from langchain_community.tools.office365.base import O365BaseTool from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.slack.base import SlackBaseTool def get_non_abstract_subclasses(cls: Type[BaseTool]) -> List[Type[BaseTool]]: to_skip = { AmadeusBaseTool, BaseBrowserTool, GmailBaseTool, O365BaseTool, SlackBaseTool, } # Abstract but not recognized subclasses = [] for subclass in cls.__subclasses__(): if ( not getattr(subclass, "__abstract__", None) and not subclass.__name__.startswith("_") and subclass not in to_skip ): subclasses.append(subclass) sc = get_non_abstract_subclasses(subclass) subclasses.extend(sc) return subclasses @pytest.mark.parametrize("cls", get_non_abstract_subclasses(BaseTool)) # type: ignore def test_all_subclasses_accept_run_manager(cls: Type[BaseTool]) -> None: """Test that tools defined in this repo accept a run manager argument.""" # This wouldn't be necessary if the BaseTool had a strict API. if cls._run is not BaseTool._run: run_func = cls._run params = inspect.signature(run_func).parameters assert "run_manager" in params pattern = re.compile(r"(?!Async)CallbackManagerForToolRun") assert bool(re.search(pattern, str(params["run_manager"].annotation))) assert params["run_manager"].default is None if cls._arun is not BaseTool._arun: run_func = cls._arun params = inspect.signature(run_func).parameters assert "run_manager" in params assert "AsyncCallbackManagerForToolRun" in str(params["run_manager"].annotation) assert params["run_manager"].default is None
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_imports.py
from langchain_community.tools import __all__, _module_lookup EXPECTED_ALL = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AskNewsSearch", "AzureAiServicesDocumentIntelligenceTool", "AzureAiServicesImageAnalysisTool", "AzureAiServicesSpeechToTextTool", "AzureAiServicesTextToSpeechTool", "AzureAiServicesTextAnalyticsForHealthTool", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BalanceSheets", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "CashFlowStatements", "ClickTool", "CogniswitchKnowledgeSourceFile", "CogniswitchKnowledgeSourceURL", "CogniswitchKnowledgeRequest", "CogniswitchKnowledgeStatus", "ConneryAction", "CopyFileTool", "CurrentWebPageTool", "DataheraldTextToSQL", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleBooksQueryRun", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "HumanInputRun", "IFTTTWebhook", "IncomeStatements", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JinaSearch", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "PolygonAggregates", "PolygonFinancials", "PolygonLastQuote", "PolygonTickerNews", "RedditSearchRun", "RedditSearchSchema", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SceneXplainTool", "SearchAPIRun", "SearchAPIResults", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StackExchangeTool", "StdInInquireTool", "SteamWebAPIQueryRun", "SteamshipImageGenerationTool", "StructuredTool", "TavilyAnswer", "TavilySearchResults", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouSearchTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "Detector", "ZenGuardInput", "ZenGuardTool", "authenticate", "format_tool_to_openai_function", "tool", "MerriamWebsterQueryRun", "MojeekSearch", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_you.py
from unittest.mock import AsyncMock, patch import responses from langchain_community.tools.you import YouSearchTool from langchain_community.utilities.you import YouSearchAPIWrapper from ..utilities.test_you import ( LIMITED_PARSED_OUTPUT, MOCK_PARSED_OUTPUT, MOCK_RESPONSE_RAW, NEWS_RESPONSE_PARSED, NEWS_RESPONSE_RAW, TEST_ENDPOINT, ) class TestYouSearchTool: @responses.activate def test_invoke(self) -> None: responses.add( responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg] results = you_tool.invoke(query) expected_result = MOCK_PARSED_OUTPUT assert results == expected_result @responses.activate def test_invoke_max_docs(self) -> None: responses.add( responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", k=2) ) results = you_tool.invoke(query) expected_result = [MOCK_PARSED_OUTPUT[0], MOCK_PARSED_OUTPUT[1]] assert results == expected_result @responses.activate def test_invoke_limit_snippets(self) -> None: responses.add( responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", n_snippets_per_hit=1) ) results = you_tool.invoke(query) expected_result = LIMITED_PARSED_OUTPUT assert results == expected_result @responses.activate def test_invoke_news(self) -> None: responses.add( responses.GET, f"{TEST_ENDPOINT}/news", json=NEWS_RESPONSE_RAW, status=200 ) query = "Test news text" you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", endpoint_type="news") ) results = you_tool.invoke(query) expected_result = NEWS_RESPONSE_PARSED assert results == expected_result async def test_ainvoke(self) -> None: you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg] # Mock response object to simulate aiohttp response mock_response = AsyncMock() mock_response.__aenter__.return_value = ( mock_response # Make the context manager return itself ) mock_response.__aexit__.return_value = None # No value needed for exit mock_response.status = 200 mock_response.json = AsyncMock(return_value=MOCK_RESPONSE_RAW) # Patch the aiohttp.ClientSession object with patch("aiohttp.ClientSession.get", return_value=mock_response): results = await you_tool.ainvoke("test query") assert results == MOCK_PARSED_OUTPUT
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/__init__.py
"""Test suite for the tools module."""
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_exported.py
from typing import List, Type from langchain_core.tools import BaseTool, StructuredTool from langchain_core.utils.pydantic import get_fields import langchain_community.tools from langchain_community.tools import _DEPRECATED_TOOLS from langchain_community.tools import __all__ as tools_all _EXCLUDE = { BaseTool, StructuredTool, } def _get_tool_classes(skip_tools_without_default_names: bool) -> List[Type[BaseTool]]: results = [] for tool_class_name in tools_all: if tool_class_name in _DEPRECATED_TOOLS: continue # Resolve the str to the class tool_class = getattr(langchain_community.tools, tool_class_name) if isinstance(tool_class, type) and issubclass(tool_class, BaseTool): if tool_class in _EXCLUDE: continue default_name = get_fields(tool_class)["name"].default if skip_tools_without_default_names and default_name in [ # type: ignore None, "", ]: continue if not isinstance(default_name, str): continue results.append(tool_class) return results def test_tool_names_unique() -> None: """Test that the default names for our core tools are unique.""" tool_classes = _get_tool_classes(skip_tools_without_default_names=True) names = sorted([tool_cls.model_fields["name"].default for tool_cls in tool_classes]) duplicated_names = [name for name in names if names.count(name) > 1] assert not duplicated_names
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/test_json.py
"""Test functionality of JSON tools.""" from pathlib import Path from langchain_community.tools.json.tool import JsonSpec def test_json_spec_from_file(tmp_path: Path) -> None: """Test JsonSpec can be constructed from a file.""" path = tmp_path / "test.json" path.write_text('{"foo": "bar"}') spec = JsonSpec.from_file(path) assert spec.dict_ == {"foo": "bar"} def test_json_spec_keys() -> None: """Test JsonSpec can return keys of a dict at given path.""" spec = JsonSpec(dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}}) assert spec.keys("data") == "['foo', 'baz']" assert "ValueError" in spec.keys('data["foo"]') assert spec.keys('data["baz"]') == "['test']" assert spec.keys('data["baz"]["test"]') == "['foo']" assert "ValueError" in spec.keys('data["baz"]["test"]["foo"]') def test_json_spec_value() -> None: """Test JsonSpec can return value of a dict at given path.""" spec = JsonSpec(dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}}) assert spec.value("data") == "{'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}" assert spec.value('data["foo"]') == "bar" assert spec.value('data["baz"]') == "{'test': {'foo': [1, 2, 3]}}" assert spec.value('data["baz"]["test"]') == "{'foo': [1, 2, 3]}" assert spec.value('data["baz"]["test"]["foo"]') == "[1, 2, 3]" assert spec.value("data['foo']") == "bar" assert spec.value("data['baz']") == "{'test': {'foo': [1, 2, 3]}}" assert spec.value("data['baz']['test']") == "{'foo': [1, 2, 3]}" assert spec.value("data['baz']['test']['foo']") == "[1, 2, 3]" def test_json_spec_value_max_length() -> None: """Test JsonSpec can return value of a dict at given path.""" spec = JsonSpec( dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}}, max_value_length=5 ) assert spec.value('data["foo"]') == "bar" assert ( spec.value('data["baz"]') == "Value is a large dictionary, should explore its keys directly" ) assert ( spec.value('data["baz"]["test"]') == "Value is a large dictionary, should explore its keys directly" ) assert spec.value('data["baz"]["test"]["foo"]') == "[1, 2..."
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/powerbi/test_powerbi.py
def test_power_bi_can_be_imported() -> None: """Test that powerbi tools can be imported. The goal of this test is to verify that langchain users will not get import errors when loading powerbi related code if they don't have optional dependencies installed. """ from langchain_community.tools.powerbi.tool import QueryPowerBITool # noqa from langchain_community.agent_toolkits import PowerBIToolkit, create_pbi_agent # noqa from langchain_community.utilities.powerbi import PowerBIDataset # noqa
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/eden_ai/test_tools.py
from collections.abc import Generator from unittest.mock import MagicMock, patch import pytest from langchain_community.tools.edenai import EdenAiTextModerationTool tool = EdenAiTextModerationTool( # type: ignore[call-arg] providers=["openai"], language="en", edenai_api_key="fake_key", # type: ignore[arg-type] ) @pytest.fixture def mock_post() -> Generator: with patch( "langchain_community.tools.edenai.edenai_base_tool.requests.post" ) as mock: yield mock def test_provider_not_available(mock_post: MagicMock) -> None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [ { "error": { "message": """Amazon has returned an error: An error occurred (TextSizeLimitExceededException) when calling the DetectTargetedSentiment operation: Input text size exceeds limit. Max length of request text allowed is 5000 bytes while in this request the text size is 47380 bytes""", "type": "ProviderInvalidInputTextLengthError", }, "status": "fail", "provider": "amazon", "provider_status_code": 400, "cost": 0.0, } ] mock_post.return_value = mock_response with pytest.raises(ValueError): tool._run("some query") def test_unexpected_response(mock_post: MagicMock) -> None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [ { "status": "success", } ] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run("some query") def test_incomplete_response(mock_post: MagicMock) -> None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [ { "status": "success", "provider": "microsoft", "nsfw_likelihood": 5, "cost": 0.001, "label": ["sexually explicit", "sexually suggestive", "offensive"], } ] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run("some query") def test_invalid_payload(mock_post: MagicMock) -> None: mock_response = MagicMock() mock_response.status_code = 400 mock_response.json.return_value = {} mock_post.return_value = mock_response with pytest.raises(ValueError): tool._run("some query") def test_parse_response_format(mock_post: MagicMock) -> None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [ { "status": "success", "provider": "microsoft", "nsfw_likelihood": 5, "cost": 0.001, "label": ["offensive", "hate_speech"], "likelihood": [4, 5], } ] mock_post.return_value = mock_response result = tool.invoke("some query") assert result == 'nsfw_likelihood: 5\n"offensive": 4\n"hate_speech": 5'
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/openai_dalle_image_generation/test_image_generation.py
from unittest.mock import MagicMock from langchain_community.tools.openai_dalle_image_generation import ( OpenAIDALLEImageGenerationTool, ) def test_generate_image() -> None: """Test OpenAI DALLE Image Generation.""" mock_api_resource = MagicMock() # bypass pydantic validation as openai is not a package dependency tool = OpenAIDALLEImageGenerationTool.construct(api_wrapper=mock_api_resource) tool_input = {"query": "parrot on a branch"} result = tool.run(tool_input) assert result.startswith("https://")
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/shell/test_shell.py
import warnings from typing import List from unittest.mock import patch from langchain_community.tools.shell.tool import ShellInput, ShellTool # Test data test_commands = ["echo 'Hello, World!'", "echo 'Another command'"] def test_shell_input_validation() -> None: shell_input = ShellInput(commands=test_commands) assert isinstance(shell_input.commands, list) assert len(shell_input.commands) == 2 with warnings.catch_warnings(record=True) as w: ShellInput(commands=test_commands) assert len(w) == 1 assert ( str(w[-1].message) == "The shell tool has no safeguards by default. Use at your own risk." ) class PlaceholderProcess: def __init__(self, output: str = "") -> None: self._commands: List[str] = [] self.output = output def _run(self, commands: List[str]) -> str: self._commands = commands return self.output def run(self, commands: List[str]) -> str: return self._run(commands) async def arun(self, commands: List[str]) -> str: return self._run(commands) def test_shell_tool_init() -> None: placeholder = PlaceholderProcess() shell_tool = ShellTool(process=placeholder) assert shell_tool.name == "terminal" assert isinstance(shell_tool.description, str) assert shell_tool.args_schema == ShellInput assert shell_tool.process is not None def test_shell_tool_run() -> None: placeholder = PlaceholderProcess(output="hello") shell_tool = ShellTool(process=placeholder) result = shell_tool._run(commands=test_commands) assert result.strip() == "hello" async def test_shell_tool_arun() -> None: placeholder = PlaceholderProcess(output="hello") shell_tool = ShellTool(process=placeholder) result = await shell_tool._arun(commands=test_commands) assert result.strip() == "hello" def test_shell_tool_run_str() -> None: placeholder = PlaceholderProcess(output="hello") shell_tool = ShellTool(process=placeholder) result = shell_tool._run(commands="echo 'Hello, World!'") assert result.strip() == "hello" async def test_shell_tool_arun_with_user_confirmation() -> None: placeholder = PlaceholderProcess(output="hello") shell_tool = ShellTool(process=placeholder, ask_human_input=True) with patch("builtins.input", return_value="y"): result = await shell_tool._arun(commands=test_commands) assert result.strip() == "hello" with patch("builtins.input", return_value="n"): result = await shell_tool._arun(commands=test_commands) assert result is None def test_shell_tool_run_with_user_confirmation() -> None: placeholder = PlaceholderProcess(output="hello") shell_tool = ShellTool(process=placeholder, ask_human_input=True) with patch("builtins.input", return_value="y"): result = shell_tool._run(commands="echo 'Hello, World!'") assert result.strip() == "hello" with patch("builtins.input", return_value="n"): result = shell_tool._run(commands="echo 'Hello, World!'") assert result is None
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/gmail/test_send.py
from unittest.mock import MagicMock from langchain_community.tools.gmail.send_message import GmailSendMessage def test_send() -> None: """Test gmail send.""" mock_api_resource = MagicMock() # bypass pydantic validation as google-api-python-client is not a package dependency tool = GmailSendMessage.construct(api_resource=mock_api_resource) tool_input = { "to": "fake123@email.com", "subject": "subject line", "message": "message body", } result = tool.run(tool_input) assert result.startswith("Message sent. Message Id:") assert tool.args_schema is not None
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/databricks/test_tools.py
from unittest import mock import pytest from langchain_community.tools.databricks._execution import ( DEFAULT_EXECUTE_FUNCTION_ARGS, EXECUTE_FUNCTION_ARG_NAME, execute_function, ) @pytest.mark.requires("databricks.sdk") @pytest.mark.parametrize( ("parameters", "execute_params"), [ ({"a": 1, "b": 2}, DEFAULT_EXECUTE_FUNCTION_ARGS), ( {"a": 1, EXECUTE_FUNCTION_ARG_NAME: {"wait_timeout": "10s"}}, {**DEFAULT_EXECUTE_FUNCTION_ARGS, "wait_timeout": "10s"}, ), ( {EXECUTE_FUNCTION_ARG_NAME: {"row_limit": "1000"}}, {**DEFAULT_EXECUTE_FUNCTION_ARGS, "row_limit": "1000"}, ), ], ) def test_execute_function(parameters: dict, execute_params: dict) -> None: workspace_client = mock.Mock() def mock_execute_statement( # type: ignore statement, warehouse_id, *, byte_limit=None, catalog=None, disposition=None, format=None, on_wait_timeout=None, parameters=None, row_limit=None, schema=None, wait_timeout=None, ): for key, value in execute_params.items(): assert locals()[key] == value return mock.Mock() workspace_client.statement_execution.execute_statement = mock_execute_statement function = mock.Mock() function.data_type = "TABLE_TYPE" function.input_params.parameters = [] execute_function( workspace_client, warehouse_id="id", function=function, parameters=parameters ) @pytest.mark.requires("databricks.sdk") def test_execute_function_error() -> None: workspace_client = mock.Mock() def mock_execute_statement( # type: ignore statement, warehouse_id, *, byte_limit=None, catalog=None, disposition=None, format=None, on_wait_timeout=None, parameters=None, row_limit=None, schema=None, wait_timeout=None, ): return mock.Mock() workspace_client.statement_execution.execute_statement = mock_execute_statement function = mock.Mock() function.data_type = "TABLE_TYPE" function.input_params.parameters = [] parameters = {EXECUTE_FUNCTION_ARG_NAME: {"invalid_param": "123"}} with pytest.raises( ValueError, match=r"Invalid parameters for executing functions: {'invalid_param'}. ", ): execute_function( workspace_client, warehouse_id="id", function=function, parameters=parameters, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_move.py
"""Test the FileMove tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.move import MoveFileTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, ) def test_move_file_with_root_dir() -> None: """Test the FileMove tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = MoveFileTool(root_dir=temp_dir) source_file = Path(temp_dir) / "source.txt" destination_file = Path(temp_dir) / "destination.txt" source_file.write_text("Hello, world!") tool.run({"source_path": "source.txt", "destination_path": "destination.txt"}) assert not source_file.exists() assert destination_file.exists() assert destination_file.read_text() == "Hello, world!" def test_move_file_errs_outside_root_dir() -> None: """Test the FileMove tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = MoveFileTool(root_dir=temp_dir) result = tool.run( { "source_path": "../source.txt", "destination_path": "../destination.txt", } ) assert result == INVALID_PATH_TEMPLATE.format( arg_name="source_path", value="../source.txt" ) def test_move_file() -> None: """Test the FileMove tool.""" with TemporaryDirectory() as temp_dir: tool = MoveFileTool() source_file = Path(temp_dir) / "source.txt" destination_file = Path(temp_dir) / "destination.txt" source_file.write_text("Hello, world!") tool.run( {"source_path": str(source_file), "destination_path": str(destination_file)} ) assert not source_file.exists() assert destination_file.exists() assert destination_file.read_text() == "Hello, world!"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_file_search.py
"""Test the FileSearch tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.file_search import FileSearchTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, ) def test_file_search_with_root_dir() -> None: """Test the FileSearch tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = FileSearchTool(root_dir=temp_dir) file_1 = Path(temp_dir) / "file1.txt" file_2 = Path(temp_dir) / "file2.log" file_1.write_text("File 1 content") file_2.write_text("File 2 content") matches = tool.run({"dir_path": ".", "pattern": "*.txt"}).split("\n") assert len(matches) == 1 assert Path(matches[0]).name == "file1.txt" def test_file_search_errs_outside_root_dir() -> None: """Test the FileSearch tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = FileSearchTool(root_dir=temp_dir) result = tool.run({"dir_path": "..", "pattern": "*.txt"}) assert result == INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value="..") def test_file_search() -> None: """Test the FileSearch tool.""" with TemporaryDirectory() as temp_dir: tool = FileSearchTool() file_1 = Path(temp_dir) / "file1.txt" file_2 = Path(temp_dir) / "file2.log" file_1.write_text("File 1 content") file_2.write_text("File 2 content") matches = tool.run({"dir_path": temp_dir, "pattern": "*.txt"}).split("\n") assert len(matches) == 1 assert Path(matches[0]).name == "file1.txt"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_read.py
"""Test the ReadFile tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.read import ReadFileTool def test_read_file_with_root_dir() -> None: """Test the ReadFile tool.""" with TemporaryDirectory() as temp_dir: with (Path(temp_dir) / "file.txt").open("w") as f: f.write("Hello, world!") tool = ReadFileTool(root_dir=temp_dir) result = tool.run("file.txt") assert result == "Hello, world!" # Check absolute files can still be passed if they lie within the root dir. result = tool.run(str(Path(temp_dir) / "file.txt")) assert result == "Hello, world!" def test_read_file() -> None: """Test the ReadFile tool.""" with TemporaryDirectory() as temp_dir: with (Path(temp_dir) / "file.txt").open("w") as f: f.write("Hello, world!") tool = ReadFileTool() result = tool.run(str(Path(temp_dir) / "file.txt")) assert result == "Hello, world!"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_write.py
"""Test the WriteFile tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, ) from langchain_community.tools.file_management.write import WriteFileTool def test_write_file_with_root_dir() -> None: """Test the WriteFile tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = WriteFileTool(root_dir=temp_dir) tool.run({"file_path": "file.txt", "text": "Hello, world!"}) assert (Path(temp_dir) / "file.txt").exists() assert (Path(temp_dir) / "file.txt").read_text() == "Hello, world!" def test_write_file_errs_outside_root_dir() -> None: """Test the WriteFile tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = WriteFileTool(root_dir=temp_dir) result = tool.run({"file_path": "../file.txt", "text": "Hello, world!"}) assert result == INVALID_PATH_TEMPLATE.format( arg_name="file_path", value="../file.txt" ) def test_write_file() -> None: """Test the WriteFile tool.""" with TemporaryDirectory() as temp_dir: file_path = str(Path(temp_dir) / "file.txt") tool = WriteFileTool() tool.run({"file_path": file_path, "text": "Hello, world!"}) assert (Path(temp_dir) / "file.txt").exists() assert (Path(temp_dir) / "file.txt").read_text() == "Hello, world!"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_copy.py
"""Test the FileCopy tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.copy import CopyFileTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, ) def test_copy_file_with_root_dir() -> None: """Test the FileCopy tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = CopyFileTool(root_dir=temp_dir) source_file = Path(temp_dir) / "source.txt" destination_file = Path(temp_dir) / "destination.txt" source_file.write_text("Hello, world!") tool.run({"source_path": "source.txt", "destination_path": "destination.txt"}) assert source_file.exists() assert destination_file.exists() assert source_file.read_text() == "Hello, world!" assert destination_file.read_text() == "Hello, world!" def test_copy_file_errs_outside_root_dir() -> None: """Test the FileCopy tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = CopyFileTool(root_dir=temp_dir) result = tool.run( { "source_path": "../source.txt", "destination_path": "../destination.txt", } ) assert result == INVALID_PATH_TEMPLATE.format( arg_name="source_path", value="../source.txt" ) def test_copy_file() -> None: """Test the FileCopy tool.""" with TemporaryDirectory() as temp_dir: tool = CopyFileTool() source_file = Path(temp_dir) / "source.txt" destination_file = Path(temp_dir) / "destination.txt" source_file.write_text("Hello, world!") tool.run( {"source_path": str(source_file), "destination_path": str(destination_file)} ) assert source_file.exists() assert destination_file.exists() assert source_file.read_text() == "Hello, world!" assert destination_file.read_text() == "Hello, world!"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_toolkit.py
"""Test the FileManagementToolkit.""" from tempfile import TemporaryDirectory import pytest from langchain_core.tools import BaseTool from langchain_community.agent_toolkits.file_management.toolkit import ( FileManagementToolkit, ) def test_file_toolkit_get_tools() -> None: """Test the get_tools method of FileManagementToolkit.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() assert len(tools) > 0 assert all(isinstance(tool, BaseTool) for tool in tools) def test_file_toolkit_get_tools_with_selection() -> None: """Test the get_tools method of FileManagementToolkit with selected_tools.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit( root_dir=temp_dir, selected_tools=["read_file", "write_file"] ) tools = toolkit.get_tools() assert len(tools) == 2 tool_names = [tool.name for tool in tools] assert "read_file" in tool_names assert "write_file" in tool_names def test_file_toolkit_invalid_tool() -> None: """Test the FileManagementToolkit with an invalid tool.""" with TemporaryDirectory() as temp_dir: with pytest.raises(ValueError): FileManagementToolkit(root_dir=temp_dir, selected_tools=["invalid_tool"]) def test_file_toolkit_root_dir() -> None: """Test the FileManagementToolkit root_dir handling.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() root_dirs = [tool.root_dir for tool in tools if hasattr(tool, "root_dir")] assert all(root_dir == temp_dir for root_dir in root_dirs)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_list_dir.py
"""Test the DirectoryListing tool.""" from pathlib import Path from tempfile import TemporaryDirectory from langchain_community.tools.file_management.list_dir import ListDirectoryTool from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, ) def test_list_directory_with_root_dir() -> None: """Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) file_1 = Path(temp_dir) / "file1.txt" file_2 = Path(temp_dir) / "file2.txt" file_1.write_text("File 1 content") file_2.write_text("File 2 content") entries = tool.run({"dir_path": "."}).split("\n") assert set(entries) == {"file1.txt", "file2.txt"} def test_list_directory_errs_outside_root_dir() -> None: """Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) result = tool.run({"dir_path": ".."}) assert result == INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value="..") def test_list_directory() -> None: """Test the DirectoryListing tool.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool() file_1 = Path(temp_dir) / "file1.txt" file_2 = Path(temp_dir) / "file2.txt" file_1.write_text("File 1 content") file_2.write_text("File 2 content") entries = tool.run({"dir_path": temp_dir}).split("\n") assert set(entries) == {"file1.txt", "file2.txt"}
0
lc_public_repos/langchain/libs/community/tests/unit_tests/tools
lc_public_repos/langchain/libs/community/tests/unit_tests/tools/file_management/test_utils.py
"""Test the File Management utils.""" import re from pathlib import Path from tempfile import TemporaryDirectory import pytest from langchain_community.tools.file_management.utils import ( FileValidationError, get_validated_relative_path, ) def test_get_validated_relative_path_errs_on_absolute() -> None: """Safely resolve a path.""" root = Path(__file__).parent user_path = "/bin/bash" match = re.escape(f"Path {user_path} is outside of the allowed directory {root}") with pytest.raises(FileValidationError, match=match): get_validated_relative_path(root, user_path) def test_get_validated_relative_path_errs_on_parent_dir() -> None: """Safely resolve a path.""" root = Path(__file__).parent user_path = "data/sub/../../../sibling" match = re.escape(f"Path {user_path} is outside of the allowed directory {root}") with pytest.raises(FileValidationError, match=match): get_validated_relative_path(root, user_path) def test_get_validated_relative_path() -> None: """Safely resolve a path.""" root = Path(__file__).parent user_path = "data/sub/file.txt" expected = root / user_path result = get_validated_relative_path(root, user_path) assert result == expected def test_get_validated_relative_path_errs_for_symlink_outside_root() -> None: """Test that symlink pointing outside of root directory is not allowed.""" with TemporaryDirectory() as temp_dir: root = Path(temp_dir) user_path = "symlink_outside_root" outside_path = Path("/bin/bash") symlink_path = root / user_path symlink_path.symlink_to(outside_path) match = re.escape( f"Path {user_path} is outside of the allowed directory {root.resolve()}" ) with pytest.raises(FileValidationError, match=match): get_validated_relative_path(root, user_path) symlink_path.unlink() def test_get_validated_relative_path_for_symlink_inside_root() -> None: """Test that symlink pointing inside the root directory is allowed.""" with TemporaryDirectory() as temp_dir: root = Path(temp_dir) user_path = "symlink_inside_root" target_path = "data/sub/file.txt" symlink_path = root / user_path target_path_ = root / target_path symlink_path.symlink_to(target_path_) expected = target_path_.resolve() result = get_validated_relative_path(root, user_path) assert result == expected symlink_path.unlink()