index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cross_encoders/__init__.py | """Test cross encoder integrations."""
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/prompts/test_ngram_overlap_example_selector.py | """Test functionality related to ngram overlap based selector."""
import pytest
from langchain_core.prompts import PromptTemplate
from langchain_community.example_selectors import (
NGramOverlapExampleSelector,
ngram_overlap_score,
)
EXAMPLES = [
{"input": "See Spot run.", "output": "foo1"},
{"input": "My dog barks.", "output": "foo2"},
{"input": "Spot can run.", "output": "foo3"},
]
@pytest.fixture
def selector() -> NGramOverlapExampleSelector:
"""Get ngram overlap based selector to use in tests."""
prompts = PromptTemplate(
input_variables=["input", "output"], template="Input: {input}\nOutput: {output}"
)
selector = NGramOverlapExampleSelector(
examples=EXAMPLES,
example_prompt=prompts,
)
return selector
def test_selector_valid(selector: NGramOverlapExampleSelector) -> None:
"""Test NGramOverlapExampleSelector can select examples."""
sentence = "Spot can run."
output = selector.select_examples({"input": sentence})
assert output == [EXAMPLES[2], EXAMPLES[0], EXAMPLES[1]]
def test_selector_add_example(selector: NGramOverlapExampleSelector) -> None:
"""Test NGramOverlapExampleSelector can add an example."""
new_example = {"input": "Spot plays fetch.", "output": "foo4"}
selector.add_example(new_example)
sentence = "Spot can run."
output = selector.select_examples({"input": sentence})
assert output == [EXAMPLES[2], EXAMPLES[0]] + [new_example] + [EXAMPLES[1]]
def test_selector_threshold_zero(selector: NGramOverlapExampleSelector) -> None:
"""Tests NGramOverlapExampleSelector threshold set to 0.0."""
selector.threshold = 0.0
sentence = "Spot can run."
output = selector.select_examples({"input": sentence})
assert output == [EXAMPLES[2], EXAMPLES[0]]
def test_selector_threshold_more_than_one(
selector: NGramOverlapExampleSelector,
) -> None:
"""Tests NGramOverlapExampleSelector threshold greater than 1.0."""
selector.threshold = 1.0 + 1e-9
sentence = "Spot can run."
output = selector.select_examples({"input": sentence})
assert output == []
def test_ngram_overlap_score(selector: NGramOverlapExampleSelector) -> None:
"""Tests that ngram_overlap_score returns correct values."""
selector.threshold = 1.0 + 1e-9
none = ngram_overlap_score(["Spot can run."], ["My dog barks."])
some = ngram_overlap_score(["Spot can run."], ["See Spot run."])
complete = ngram_overlap_score(["Spot can run."], ["Spot can run."])
check = [abs(none - 0.0) < 1e-9, 0.0 < some < 1.0, abs(complete - 1.0) < 1e-9]
assert check == [True, True, True]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_opensearch_cache.py | from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_community.cache import OpenSearchSemanticCache
from tests.integration_tests.cache.fake_embeddings import (
FakeEmbeddings,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
DEFAULT_OPENSEARCH_URL = "http://localhost:9200"
def test_opensearch_semantic_cache() -> None:
"""Test opensearch semantic cache functionality."""
set_llm_cache(
OpenSearchSemanticCache(
embedding=FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
score_threshold=0.0,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
get_llm_cache().clear(llm_string=llm_string)
output = get_llm_cache().lookup("bar", llm_string)
assert output != [Generation(text="fizz")]
def test_opensearch_semantic_cache_multi() -> None:
set_llm_cache(
OpenSearchSemanticCache(
embedding=FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
score_threshold=0.0,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz"), Generation(text="Buzz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
output = get_llm_cache().lookup("bar", llm_string)
assert output != [Generation(text="fizz"), Generation(text="Buzz")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_azure_cosmosdb_cache.py | """Test Azure CosmosDB cache functionality.
Required to run this test:
- a recent 'pymongo' Python package available
- an Azure CosmosDB Mongo vCore instance
- one environment variable set:
export MONGODB_VCORE_URI="connection string for azure cosmos db mongo vCore"
"""
import os
import uuid
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_community.cache import AzureCosmosDBSemanticCache
from langchain_community.vectorstores.azure_cosmos_db import (
CosmosDBSimilarityType,
CosmosDBVectorSearchType,
)
from tests.integration_tests.cache.fake_embeddings import (
FakeEmbeddings,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
INDEX_NAME = "langchain-test-index"
NAMESPACE = "langchain_test_db.langchain_test_collection"
CONNECTION_STRING: str = os.environ.get("MONGODB_VCORE_URI", "")
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
num_lists = 3
dimensions = 10
similarity_algorithm = CosmosDBSimilarityType.COS
kind = CosmosDBVectorSearchType.VECTOR_IVF
m = 16
ef_construction = 64
ef_search = 40
score_threshold = 0.1
application_name = "LANGCHAIN_CACHING_PYTHON"
def _has_env_vars() -> bool:
return all(["MONGODB_VCORE_URI" in os.environ])
def random_string() -> str:
return str(uuid.uuid4())
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=similarity_algorithm,
kind=kind,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_inner_product() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=CosmosDBSimilarityType.IP,
kind=kind,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=similarity_algorithm,
kind=kind,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz"), Generation(text="Buzz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_inner_product() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=CosmosDBSimilarityType.IP,
kind=kind,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz"), Generation(text="Buzz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_hnsw() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=similarity_algorithm,
kind=CosmosDBVectorSearchType.VECTOR_HNSW,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_inner_product_hnsw() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=CosmosDBSimilarityType.IP,
kind=CosmosDBVectorSearchType.VECTOR_HNSW,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_hnsw() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=similarity_algorithm,
kind=CosmosDBVectorSearchType.VECTOR_HNSW,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz"), Generation(text="Buzz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_inner_product_hnsw() -> None:
set_llm_cache(
AzureCosmosDBSemanticCache(
cosmosdb_connection_string=CONNECTION_STRING,
cosmosdb_client=None,
embedding=FakeEmbeddings(),
database_name=DB_NAME,
collection_name=COLLECTION_NAME,
num_lists=num_lists,
similarity=CosmosDBSimilarityType.IP,
kind=CosmosDBVectorSearchType.VECTOR_HNSW,
dimensions=dimensions,
m=m,
ef_construction=ef_construction,
ef_search=ef_search,
score_threshold=score_threshold,
application_name=application_name,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
# foo and bar will have the same embedding produced by FakeEmbeddings
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz"), Generation(text="Buzz")]
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_upstash_redis_cache.py | """Test Upstash Redis cache functionality."""
import uuid
import langchain
import pytest
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import UpstashRedisCache
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
def random_string() -> str:
return str(uuid.uuid4())
@pytest.mark.requires("upstash_redis")
def test_redis_cache_ttl() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
langchain.llm_cache.update("foo", "bar", [Generation(text="fizz")])
key = langchain.llm_cache._key("foo", "bar")
assert langchain.llm_cache.redis.pttl(key) > 0
@pytest.mark.requires("upstash_redis")
def test_redis_cache() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
lookup_output = langchain.llm_cache.lookup("foo", llm_string)
if lookup_output and len(lookup_output) > 0:
assert lookup_output == expected_output.generations[0]
langchain.llm_cache.clear()
output = llm.generate(["foo"])
assert output != expected_output
langchain.llm_cache.redis.flushall()
def test_redis_cache_multi() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
output = llm.generate(
["foo"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz"), Generation(text="Buzz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
langchain.llm_cache.clear()
@pytest.mark.requires("upstash_redis")
def test_redis_cache_chat() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm.invoke("foo")
langchain.llm_cache.redis.flushall()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_astradb.py | """
Test AstraDB caches. Requires an Astra DB vector instance.
Required to run this test:
- a recent `astrapy` Python package available
- an Astra DB instance;
- the two environment variables set:
export ASTRA_DB_API_ENDPOINT="https://<DB-ID>-us-east1.apps.astra.datastax.com"
export ASTRA_DB_APPLICATION_TOKEN="AstraCS:........."
- optionally this as well (otherwise defaults are used):
export ASTRA_DB_KEYSPACE="my_keyspace"
"""
import os
from typing import AsyncIterator, Iterator
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.caches import BaseCache
from langchain_core.language_models import LLM
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import AstraDBCache, AstraDBSemanticCache
from langchain_community.utilities.astradb import SetupMode
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM
def _has_env_vars() -> bool:
return all(
[
"ASTRA_DB_APPLICATION_TOKEN" in os.environ,
"ASTRA_DB_API_ENDPOINT" in os.environ,
]
)
@pytest.fixture(scope="module")
def astradb_cache() -> Iterator[AstraDBCache]:
cache = AstraDBCache(
collection_name="lc_integration_test_cache",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
yield cache
cache.collection.astra_db.delete_collection("lc_integration_test_cache")
@pytest.fixture
async def async_astradb_cache() -> AsyncIterator[AstraDBCache]:
cache = AstraDBCache(
collection_name="lc_integration_test_cache_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
setup_mode=SetupMode.ASYNC,
)
yield cache
await cache.async_collection.astra_db.delete_collection(
"lc_integration_test_cache_async"
)
@pytest.fixture(scope="module")
def astradb_semantic_cache() -> Iterator[AstraDBSemanticCache]:
fake_embe = FakeEmbeddings()
sem_cache = AstraDBSemanticCache(
collection_name="lc_integration_test_sem_cache",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
embedding=fake_embe,
)
yield sem_cache
sem_cache.collection.astra_db.delete_collection("lc_integration_test_sem_cache")
@pytest.fixture
async def async_astradb_semantic_cache() -> AsyncIterator[AstraDBSemanticCache]:
fake_embe = FakeEmbeddings()
sem_cache = AstraDBSemanticCache(
collection_name="lc_integration_test_sem_cache_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
embedding=fake_embe,
setup_mode=SetupMode.ASYNC,
)
yield sem_cache
sem_cache.collection.astra_db.delete_collection(
"lc_integration_test_sem_cache_async"
)
@pytest.mark.requires("astrapy")
@pytest.mark.skipif(not _has_env_vars(), reason="Missing Astra DB env. vars")
class TestAstraDBCaches:
def test_astradb_cache(self, astradb_cache: AstraDBCache) -> None:
self.do_cache_test(FakeLLM(), astradb_cache, "foo")
async def test_astradb_cache_async(self, async_astradb_cache: AstraDBCache) -> None:
await self.ado_cache_test(FakeLLM(), async_astradb_cache, "foo")
def test_astradb_semantic_cache(
self, astradb_semantic_cache: AstraDBSemanticCache
) -> None:
llm = FakeLLM()
self.do_cache_test(llm, astradb_semantic_cache, "bar")
output = llm.generate(["bar"]) # 'fizz' is erased away now
assert output != LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
astradb_semantic_cache.clear()
async def test_astradb_semantic_cache_async(
self, async_astradb_semantic_cache: AstraDBSemanticCache
) -> None:
llm = FakeLLM()
await self.ado_cache_test(llm, async_astradb_semantic_cache, "bar")
output = await llm.agenerate(["bar"]) # 'fizz' is erased away now
assert output != LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
await async_astradb_semantic_cache.aclear()
@staticmethod
def do_cache_test(llm: LLM, cache: BaseCache, prompt: str) -> None:
set_llm_cache(cache)
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate([prompt])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
cache.clear()
@staticmethod
async def ado_cache_test(llm: LLM, cache: BaseCache, prompt: str) -> None:
set_llm_cache(cache)
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
output = await llm.agenerate([prompt])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
await cache.aclear()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/fake_embeddings.py | """Fake Embedding class for testing purposes."""
import math
from typing import List
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_redis_cache.py | """Test Redis cache functionality."""
import uuid
from contextlib import asynccontextmanager, contextmanager
from typing import AsyncGenerator, Generator, List, Optional, cast
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.embeddings import Embeddings
from langchain_core.load.dump import dumps
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
from langchain_community.cache import AsyncRedisCache, RedisCache, RedisSemanticCache
from tests.integration_tests.cache.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
# Using a non-standard port to avoid conflicts with potentially local running
# redis instances
# You can spin up a local redis using docker compose
# cd [repository-root]/docker
# docker-compose up redis
REDIS_TEST_URL = "redis://localhost:6020"
def random_string() -> str:
return str(uuid.uuid4())
@contextmanager
def get_sync_redis(*, ttl: Optional[int] = 1) -> Generator[RedisCache, None, None]:
"""Get a sync RedisCache instance."""
import redis
cache = RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL), ttl=ttl)
try:
yield cache
finally:
cache.clear()
@asynccontextmanager
async def get_async_redis(
*, ttl: Optional[int] = 1
) -> AsyncGenerator[AsyncRedisCache, None]:
"""Get an async RedisCache instance."""
from redis.asyncio import Redis
cache = AsyncRedisCache(redis_=Redis.from_url(REDIS_TEST_URL), ttl=ttl)
try:
yield cache
finally:
await cache.aclear()
def test_redis_cache_ttl() -> None:
from redis import Redis
with get_sync_redis() as llm_cache:
set_llm_cache(llm_cache)
llm_cache.update("foo", "bar", [Generation(text="fizz")])
key = llm_cache._key("foo", "bar")
assert isinstance(llm_cache.redis, Redis)
assert llm_cache.redis.pttl(key) > 0
async def test_async_redis_cache_ttl() -> None:
from redis.asyncio import Redis as AsyncRedis
async with get_async_redis() as redis_cache:
set_llm_cache(redis_cache)
llm_cache = cast(RedisCache, get_llm_cache())
await llm_cache.aupdate("foo", "bar", [Generation(text="fizz")])
key = llm_cache._key("foo", "bar")
assert isinstance(llm_cache.redis, AsyncRedis)
assert await llm_cache.redis.pttl(key) > 0
def test_sync_redis_cache() -> None:
with get_sync_redis() as llm_cache:
set_llm_cache(llm_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_cache.update("prompt", llm_string, [Generation(text="fizz0")])
output = llm.generate(["prompt"])
expected_output = LLMResult(
generations=[[Generation(text="fizz0")]],
llm_output={},
)
assert output == expected_output
async def test_sync_in_async_redis_cache() -> None:
"""Test the sync RedisCache invoked with async methods"""
with get_sync_redis() as llm_cache:
set_llm_cache(llm_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
# llm_cache.update("meow", llm_string, [Generation(text="meow")])
await llm_cache.aupdate("prompt", llm_string, [Generation(text="fizz1")])
output = await llm.agenerate(["prompt"])
expected_output = LLMResult(
generations=[[Generation(text="fizz1")]],
llm_output={},
)
assert output == expected_output
async def test_async_redis_cache() -> None:
async with get_async_redis() as redis_cache:
set_llm_cache(redis_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_cache = cast(RedisCache, get_llm_cache())
await llm_cache.aupdate("prompt", llm_string, [Generation(text="fizz2")])
output = await llm.agenerate(["prompt"])
expected_output = LLMResult(
generations=[[Generation(text="fizz2")]],
llm_output={},
)
assert output == expected_output
async def test_async_in_sync_redis_cache() -> None:
async with get_async_redis() as redis_cache:
set_llm_cache(redis_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_cache = cast(RedisCache, get_llm_cache())
with pytest.raises(NotImplementedError):
llm_cache.update("foo", llm_string, [Generation(text="fizz")])
def test_redis_cache_chat() -> None:
with get_sync_redis() as redis_cache:
set_llm_cache(redis_cache)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content="foo")]
llm_cache = cast(RedisCache, get_llm_cache())
llm_cache.update(
dumps(prompt),
llm_string,
[ChatGeneration(message=AIMessage(content="fizz"))],
)
output = llm.generate([prompt])
expected_output = LLMResult(
generations=[[ChatGeneration(message=AIMessage(content="fizz"))]],
llm_output={},
)
assert output == expected_output
async def test_async_redis_cache_chat() -> None:
async with get_async_redis() as redis_cache:
set_llm_cache(redis_cache)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content="foo")]
llm_cache = cast(RedisCache, get_llm_cache())
await llm_cache.aupdate(
dumps(prompt),
llm_string,
[ChatGeneration(message=AIMessage(content="fizz"))],
)
output = await llm.agenerate([prompt])
expected_output = LLMResult(
generations=[[ChatGeneration(message=AIMessage(content="fizz"))]],
llm_output={},
)
assert output == expected_output
def test_redis_semantic_cache() -> None:
"""Test redis semantic cache functionality."""
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_cache = cast(RedisSemanticCache, get_llm_cache())
llm_cache.update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
llm_cache.clear(llm_string=llm_string)
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
# expect different output now without cached result
assert output != expected_output
llm_cache.clear(llm_string=llm_string)
def test_redis_semantic_cache_multi() -> None:
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_cache = cast(RedisSemanticCache, get_llm_cache())
llm_cache.update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz"), Generation(text="Buzz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
llm_cache.clear(llm_string=llm_string)
def test_redis_semantic_cache_chat() -> None:
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content="foo")]
llm_cache = cast(RedisSemanticCache, get_llm_cache())
llm_cache.update(
dumps(prompt), llm_string, [ChatGeneration(message=AIMessage(content="fizz"))]
)
output = llm.generate([prompt])
expected_output = LLMResult(
generations=[[ChatGeneration(message=AIMessage(content="fizz"))]],
llm_output={},
)
assert output == expected_output
llm_cache.clear(llm_string=llm_string)
@pytest.mark.parametrize("embedding", [ConsistentFakeEmbeddings()])
@pytest.mark.parametrize(
"prompts, generations",
[
# Single prompt, single generation
([random_string()], [[random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string(), random_string()]]),
# Multiple prompts, multiple generations
(
[random_string(), random_string()],
[[random_string()], [random_string(), random_string()]],
),
],
ids=[
"single_prompt_single_generation",
"single_prompt_multiple_generations",
"single_prompt_multiple_generations",
"multiple_prompts_multiple_generations",
],
)
def test_redis_semantic_cache_hit(
embedding: Embeddings, prompts: List[str], generations: List[List[str]]
) -> None:
set_llm_cache(RedisSemanticCache(embedding=embedding, redis_url=REDIS_TEST_URL))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [
[
Generation(text=generation, generation_info=params)
for generation in prompt_i_generations
]
for prompt_i_generations in generations
]
llm_cache = cast(RedisSemanticCache, get_llm_cache())
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
print(prompt_i) # noqa: T201
print(llm_generations_i) # noqa: T201
llm_cache.update(prompt_i, llm_string, llm_generations_i)
llm.generate(prompts)
assert llm.generate(prompts) == LLMResult(
generations=llm_generations, llm_output={}
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_singlestoredb_cache.py | """Test SingleStoreDB semantic cache. Requires a SingleStore DB database.
Required to run this test:
- a recent `singlestoredb` Python package available
- a SingleStore DB instance;
"""
from importlib.util import find_spec
import pytest
from langchain_core.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_community.cache import SingleStoreDBSemanticCache
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"
singlestoredb_installed = find_spec("singlestoredb") is not None
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_tinglestoredb_semantic_cache() -> None:
"""Test opensearch semantic cache functionality."""
set_llm_cache(
SingleStoreDBSemanticCache(
embedding=FakeEmbeddings(),
host=TEST_SINGLESTOREDB_URL,
search_threshold=0.0,
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == [Generation(text="fizz")]
get_llm_cache().clear(llm_string=llm_string)
output = get_llm_cache().lookup("bar", llm_string)
assert output != [Generation(text="fizz")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_gptcache.py | import os
from typing import Any, Callable, Union
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_community.cache import GPTCache
from tests.unit_tests.llms.fake_llm import FakeLLM
try:
from gptcache import Cache # noqa: F401
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
gptcache_installed = True
except ImportError:
gptcache_installed = False
def init_gptcache_map(cache_obj: Any) -> None:
i = getattr(init_gptcache_map, "_i", 0)
cache_path = f"data_map_{i}.txt"
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=cache_path),
)
init_gptcache_map._i = i + 1 # type: ignore
def init_gptcache_map_with_llm(cache_obj: Any, llm: str) -> None:
cache_path = f"data_map_{llm}.txt"
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=cache_path),
)
@pytest.mark.skipif(not gptcache_installed, reason="gptcache not installed")
@pytest.mark.parametrize(
"init_func", [None, init_gptcache_map, init_gptcache_map_with_llm]
)
def test_gptcache_caching(
init_func: Union[Callable[[Any, str], None], Callable[[Any], None], None],
) -> None:
"""Test gptcache default caching behavior."""
set_llm_cache(GPTCache(init_func))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
_ = llm.generate(["foo", "bar", "foo"])
cache_output = get_llm_cache().lookup("foo", llm_string)
assert cache_output == [Generation(text="fizz")]
get_llm_cache().clear()
assert get_llm_cache().lookup("bar", llm_string) is None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_cassandra.py | """Test Cassandra caches. Requires a running vector-capable Cassandra cluster."""
import asyncio
import os
import time
from typing import Any, Iterator, Tuple
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import CassandraCache, CassandraSemanticCache
from langchain_community.utilities.cassandra import SetupMode
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture(scope="module")
def cassandra_connection() -> Iterator[Tuple[Any, str]]:
from cassandra.cluster import Cluster
keyspace = "langchain_cache_test_keyspace"
# get db connection
if "CASSANDRA_CONTACT_POINTS" in os.environ:
contact_points = os.environ["CASSANDRA_CONTACT_POINTS"].split(",")
cluster = Cluster(contact_points)
else:
cluster = Cluster()
#
session = cluster.connect()
# ensure keyspace exists
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} "
f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
)
yield (session, keyspace)
def test_cassandra_cache(cassandra_connection: Tuple[Any, str]) -> None:
session, keyspace = cassandra_connection
cache = CassandraCache(session=session, keyspace=keyspace)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
cache.clear()
async def test_cassandra_cache_async(cassandra_connection: Tuple[Any, str]) -> None:
session, keyspace = cassandra_connection
cache = CassandraCache(
session=session, keyspace=keyspace, setup_mode=SetupMode.ASYNC
)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
output = await llm.agenerate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
await cache.aclear()
def test_cassandra_cache_ttl(cassandra_connection: Tuple[Any, str]) -> None:
session, keyspace = cassandra_connection
cache = CassandraCache(session=session, keyspace=keyspace, ttl_seconds=2)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
output = llm.generate(["foo"])
assert output == expected_output
time.sleep(2.5)
# entry has expired away.
output = llm.generate(["foo"])
assert output != expected_output
cache.clear()
async def test_cassandra_cache_ttl_async(cassandra_connection: Tuple[Any, str]) -> None:
session, keyspace = cassandra_connection
cache = CassandraCache(
session=session, keyspace=keyspace, ttl_seconds=2, setup_mode=SetupMode.ASYNC
)
set_llm_cache(cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
output = await llm.agenerate(["foo"])
assert output == expected_output
await asyncio.sleep(2.5)
# entry has expired away.
output = await llm.agenerate(["foo"])
assert output != expected_output
await cache.aclear()
def test_cassandra_semantic_cache(cassandra_connection: Tuple[Any, str]) -> None:
session, keyspace = cassandra_connection
sem_cache = CassandraSemanticCache(
session=session,
keyspace=keyspace,
embedding=FakeEmbeddings(),
)
set_llm_cache(sem_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["bar"]) # same embedding as 'foo'
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
sem_cache.clear()
output = llm.generate(["bar"]) # 'fizz' is erased away now
assert output != expected_output
sem_cache.clear()
async def test_cassandra_semantic_cache_async(
cassandra_connection: Tuple[Any, str],
) -> None:
session, keyspace = cassandra_connection
sem_cache = CassandraSemanticCache(
session=session,
keyspace=keyspace,
embedding=FakeEmbeddings(),
setup_mode=SetupMode.ASYNC,
)
set_llm_cache(sem_cache)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
output = await llm.agenerate(["bar"]) # same embedding as 'foo'
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
await sem_cache.aclear()
output = await llm.agenerate(["bar"]) # 'fizz' is erased away now
assert output != expected_output
await sem_cache.aclear()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_memcached_cache.py | """
Test Memcached llm cache functionality. Requires running instance of Memcached on
localhost default port (11211) and pymemcache
"""
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import MemcachedCache
from tests.unit_tests.llms.fake_llm import FakeLLM
DEFAULT_MEMCACHED_URL = "localhost"
@pytest.mark.requires("pymemcache")
def test_memcached_cache() -> None:
"""Test general Memcached caching"""
from pymemcache import Client
set_llm_cache(MemcachedCache(Client(DEFAULT_MEMCACHED_URL)))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
get_llm_cache().clear()
@pytest.mark.requires("pymemcache")
def test_memcached_cache_flush() -> None:
"""Test flushing Memcached cache"""
from pymemcache import Client
set_llm_cache(MemcachedCache(Client(DEFAULT_MEMCACHED_URL)))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
get_llm_cache().clear(delay=0, noreply=False)
# After cache has been cleared, the result shouldn't be the same
output = llm.generate(["foo"])
assert output != expected_output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/cache/test_momento_cache.py | """Test Momento cache functionality.
To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid
Momento auth token. This can be obtained by signing up for a free
Momento account at https://gomomento.com/.
"""
from __future__ import annotations
import uuid
from datetime import timedelta
from typing import Iterator
import pytest
from langchain.globals import set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import MomentoCache
from tests.unit_tests.llms.fake_llm import FakeLLM
def random_string() -> str:
return str(uuid.uuid4())
@pytest.fixture(scope="module")
def momento_cache() -> Iterator[MomentoCache]:
from momento import CacheClient, Configurations, CredentialProvider
cache_name = f"langchain-test-cache-{random_string()}"
client = CacheClient(
Configurations.Laptop.v1(),
CredentialProvider.from_environment_variable("MOMENTO_API_KEY"),
default_ttl=timedelta(seconds=30),
)
try:
llm_cache = MomentoCache(client, cache_name)
set_llm_cache(llm_cache)
yield llm_cache
finally:
client.delete_cache(cache_name)
def test_invalid_ttl() -> None:
from momento import CacheClient, Configurations, CredentialProvider
client = CacheClient(
Configurations.Laptop.v1(),
CredentialProvider.from_environment_variable("MOMENTO_API_KEY"),
default_ttl=timedelta(seconds=30),
)
with pytest.raises(ValueError):
MomentoCache(client, cache_name=random_string(), ttl=timedelta(seconds=-1))
def test_momento_cache_miss(momento_cache: MomentoCache) -> None:
llm = FakeLLM()
stub_llm_output = LLMResult(generations=[[Generation(text="foo")]])
assert llm.generate([random_string()]) == stub_llm_output
@pytest.mark.parametrize(
"prompts, generations",
[
# Single prompt, single generation
([random_string()], [[random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string(), random_string()]]),
# Multiple prompts, multiple generations
(
[random_string(), random_string()],
[[random_string()], [random_string(), random_string()]],
),
],
)
def test_momento_cache_hit(
momento_cache: MomentoCache, prompts: list[str], generations: list[list[str]]
) -> None:
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [
[
Generation(text=generation, generation_info=params)
for generation in prompt_i_generations
]
for prompt_i_generations in generations
]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
momento_cache.update(prompt_i, llm_string, llm_generations_i)
assert llm.generate(prompts) == LLMResult(
generations=llm_generations, llm_output={}
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_cerebriumai.py | """Test CerebriumAI API wrapper."""
from langchain_community.llms.cerebriumai import CerebriumAI
def test_cerebriumai_call() -> None:
"""Test valid call to cerebriumai."""
llm = CerebriumAI(max_length=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_sparkllm.py | """Test SparkLLM."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.sparkllm import SparkLLM
def test_call() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_spark_llm_with_param_alias() -> None:
"""Test SparkLLM with parameters alias."""
llm = SparkLLM( # type: ignore[call-arg]
app_id="your-app-id",
api_key="your-api-key",
api_secret="your-api-secret",
model="Spark4.0 Ultra",
api_url="your-api-url",
timeout=20,
)
assert llm.spark_app_id == "your-app-id"
assert llm.spark_api_key == "your-api-key"
assert llm.spark_api_secret == "your-api-secret"
assert llm.spark_llm_domain == "Spark4.0 Ultra"
assert llm.spark_api_url == "your-api-url"
assert llm.request_timeout == 20
def test_spark_llm_with_stream() -> None:
"""Test SparkLLM with stream."""
llm = SparkLLM() # type: ignore[call-arg]
for chunk in llm.stream("你好呀"):
assert isinstance(chunk, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_confident.py | """Test Confident."""
def test_confident_deepeval() -> None:
"""Test valid call to Beam."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler
from langchain_community.llms import OpenAI
answer_relevancy = AnswerRelevancy(minimum_score=0.3)
deepeval_callback = DeepEvalCallbackHandler(
implementation_name="exampleImplementation", metrics=[answer_relevancy]
)
llm = OpenAI(
temperature=0,
callbacks=[deepeval_callback],
verbose=True,
openai_api_key="<YOUR_API_KEY>",
)
llm.generate(
[
"What is the best evaluation tool out there? (no bias at all)",
]
)
assert answer_relevancy.is_successful(), "Answer not relevant"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK:
pip install google-cloud-aiplatform>=1.36.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
import os
from typing import Optional
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import VertexAI, VertexAIModelGarden
model_names_to_test = ["text-bison@001", "gemini-pro"]
model_names_to_test_with_default = [None] + model_names_to_test
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_initialization(model_name: str) -> None:
llm = VertexAI(model_name=model_name) if model_name else VertexAI()
assert llm._llm_type == "vertexai"
try:
assert llm.model_name == llm.client._model_id
except AttributeError:
assert llm.model_name == llm.client._model_name.split("/")[-1]
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_call(model_name: str) -> None:
llm = (
VertexAI(model_name=model_name, temperature=0)
if model_name
else VertexAI(temperature=0.0)
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
@pytest.mark.scheduled
def test_vertex_generate() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="text-bison@001")
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
def test_vertex_generate_code() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="code-bison@001")
output = llm.generate(["generate a python method that says foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
async def test_vertex_agenerate() -> None:
llm = VertexAI(temperature=0)
output = await llm.agenerate(["Please say foo:"])
assert isinstance(output, LLMResult)
@pytest.mark.scheduled
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_stream(model_name: str) -> None:
llm = (
VertexAI(temperature=0, model_name=model_name)
if model_name
else VertexAI(temperature=0)
)
outputs = list(llm.stream("Please say foo:"))
assert isinstance(outputs[0], str)
async def test_vertex_consistency() -> None:
llm = VertexAI(temperature=0)
output = llm.generate(["Please say foo:"])
streaming_output = llm.generate(["Please say foo:"], stream=True)
async_output = await llm.agenerate(["Please say foo:"])
assert output.generations[0][0].text == streaming_output.generations[0][0].text
assert output.generations[0][0].text == async_output.generations[0][0].text
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
def test_model_garden(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = llm.invoke("What is the meaning of life?")
assert isinstance(output, str)
assert llm._llm_type == "vertexai_model_garden"
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
def test_model_garden_generate(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = llm.generate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
async def test_model_garden_agenerate(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = await llm.agenerate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
@pytest.mark.parametrize(
"model_name",
model_names_to_test,
)
def test_vertex_call_count_tokens(model_name: str) -> None:
llm = VertexAI(model_name=model_name)
output = llm.get_num_tokens("How are you?")
assert output == 4
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py | """Test PaiEasEndpoint API wrapper."""
import os
from typing import Generator
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
def test_pai_eas_v1_call() -> None:
"""Test valid call to PAI-EAS Service."""
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
version="1.0",
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_pai_eas_v2_call() -> None:
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
version="2.0",
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_pai_eas_v1_streaming() -> None:
"""Test streaming call to PAI-EAS Service."""
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
version="1.0",
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
def test_pai_eas_v2_streaming() -> None:
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type]
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type]
version="2.0",
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_baichuan.py | """Test Baichuan LLM Endpoint."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.baichuan import BaichuanLLM
def test_call() -> None:
"""Test valid call to baichuan."""
llm = BaichuanLLM()
output = llm.invoke("Who won the second world war?")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to baichuan."""
llm = BaichuanLLM()
output = llm.generate(["Who won the second world war?"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_pipelineai.py | """Test Pipeline Cloud API wrapper."""
from langchain_community.llms.pipelineai import PipelineAI
def test_pipelineai_call() -> None:
"""Test valid call to Pipeline Cloud."""
llm = PipelineAI()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_ai21.py | """Test AI21 API wrapper."""
from pathlib import Path
from langchain_community.llms.ai21 import AI21
from langchain_community.llms.loading import load_llm
def test_ai21_call() -> None:
"""Test valid call to ai21."""
llm = AI21(maxTokens=10)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_ai21_call_experimental() -> None:
"""Test valid call to ai21 with an experimental model."""
llm = AI21(maxTokens=10, model="j1-grande-instruct")
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an AI21 LLM."""
llm = AI21(maxTokens=10)
llm.save(file_path=tmp_path / "ai21.yaml")
loaded_llm = load_llm(tmp_path / "ai21.yaml")
assert llm == loaded_llm
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_predictionguard.py | """Test Prediction Guard API wrapper."""
from langchain_community.llms.predictionguard import PredictionGuard
def test_predictionguard_call() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="OpenAI-text-davinci-003") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_symblai_nebula.py | """Test Nebula API wrapper."""
from langchain_community.llms.symblai_nebula import Nebula
def test_symblai_nebula_call() -> None:
"""Test valid call to Nebula."""
conversation = """Sam: Good morning, team! Let's keep this standup concise.
We'll go in the usual order: what you did yesterday,
what you plan to do today, and any blockers. Alex, kick us off.
Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard.
The new charts and widgets are now responsive.
I also had a sync with the design team to ensure the final touchups are in
line with the brand guidelines. Today, I'll start integrating the frontend with
the new API endpoints Rhea was working on.
The only blocker is waiting for some final API documentation,
but I guess Rhea can update on that.
Rhea: Hey, all! Yep, about the API documentation - I completed the majority of
the backend work for user data retrieval yesterday.
The endpoints are mostly set up, but I need to do a bit more testing today.
I'll finalize the API documentation by noon, so that should unblock Alex.
After that, I’ll be working on optimizing the database queries
for faster data fetching. No other blockers on my end.
Sam: Great, thanks Rhea. Do reach out if you need any testing assistance
or if there are any hitches with the database.
Now, my update: Yesterday, I coordinated with the client to get clarity
on some feature requirements. Today, I'll be updating our project roadmap
and timelines based on their feedback. Additionally, I'll be sitting with
the QA team in the afternoon for preliminary testing.
Blocker: I might need both of you to be available for a quick call
in case the client wants to discuss the changes live.
Alex: Sounds good, Sam. Just let us know a little in advance for the call.
Rhea: Agreed. We can make time for that.
Sam: Perfect! Let's keep the momentum going. Reach out if there are any
sudden issues or support needed. Have a productive day!
Alex: You too.
Rhea: Thanks, bye!"""
llm = Nebula(nebula_api_key="<your_api_key>") # type: ignore[arg-type]
instruction = """Identify the main objectives mentioned in this
conversation."""
output = llm.invoke(f"{instruction}\n{conversation}")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_openai.py | """Test OpenAI API wrapper."""
from pathlib import Path
from typing import Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_community.chat_models.openai import ChatOpenAI
from langchain_community.llms.loading import load_llm
from langchain_community.llms.openai import OpenAI
from tests.unit_tests.callbacks.fake_callback_handler import (
FakeCallbackHandler,
)
@pytest.mark.scheduled
def test_openai_call() -> None:
"""Test valid call to openai."""
llm = OpenAI()
output = llm.invoke("Say something nice:")
assert isinstance(output, str)
def test_openai_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
llm = OpenAI(max_tokens=10)
llm_result = llm.generate(["Hello, how are you?"])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == llm.model_name
def test_openai_stop_valid() -> None:
"""Test openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = OpenAI(stop="3", temperature=0) # type: ignore[call-arg]
first_output = first_llm.invoke(query)
second_llm = OpenAI(temperature=0)
second_output = second_llm.invoke(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_openai_stop_error() -> None:
"""Test openai stop logic on bad configuration."""
llm = OpenAI(stop="3", temperature=0) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an OpenAI LLM."""
llm = OpenAI(max_tokens=10)
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
@pytest.mark.scheduled
def test_openai_streaming() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
@pytest.mark.scheduled
async def test_openai_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
@pytest.mark.scheduled
async def test_openai_abatch() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_openai_abatch_tags() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
@pytest.mark.scheduled
def test_openai_batch() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
@pytest.mark.scheduled
async def test_openai_ainvoke() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
@pytest.mark.scheduled
def test_openai_invoke() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
@pytest.mark.scheduled
def test_openai_multiple_prompts() -> None:
"""Test completion with multiple prompts."""
llm = OpenAI(max_tokens=10)
output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
def test_openai_streaming_best_of_error() -> None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
OpenAI(best_of=2, streaming=True)
def test_openai_streaming_n_error() -> None:
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
OpenAI(n=2, streaming=True)
def test_openai_streaming_multiple_prompts_error() -> None:
"""Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
@pytest.mark.scheduled
def test_openai_streaming_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10, streaming=True)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_openai_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams == 10
@pytest.mark.scheduled
async def test_openai_async_generate() -> None:
"""Test async generation."""
llm = OpenAI(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
async def test_openai_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams == 10
assert isinstance(result, LLMResult)
def test_openai_modelname_to_contextsize_valid() -> None:
"""Test model name to context size on a valid model."""
assert OpenAI().modelname_to_contextsize("davinci") == 2049
def test_openai_modelname_to_contextsize_invalid() -> None:
"""Test model name to context size on an invalid model."""
with pytest.raises(ValueError):
OpenAI().modelname_to_contextsize("foobar")
_EXPECTED_NUM_TOKENS = {
"ada": 17,
"babbage": 17,
"curie": 17,
"davinci": 17,
"gpt-4": 12,
"gpt-4-32k": 12,
"gpt-3.5-turbo": 12,
}
_MODELS = models = [
"ada",
"babbage",
"curie",
"davinci",
]
_CHAT_MODELS = [
"gpt-4",
"gpt-4-32k",
"gpt-3.5-turbo",
]
@pytest.mark.parametrize("model", _MODELS)
def test_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = OpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
@pytest.mark.parametrize("model", _CHAT_MODELS)
def test_chat_openai_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = ChatOpenAI(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "gpt-3.5-turbo-instruct",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_edenai.py | """Test EdenAi API wrapper.
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from pydantic import SecretStr
from langchain_community.llms import EdenAI
def test_edenai_call() -> None:
"""Test simple call to edenai."""
llm = EdenAI(provider="openai", temperature=0.2, max_tokens=250)
output = llm.invoke("Say foo:")
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
async def test_edenai_acall() -> None:
"""Test simple call to edenai."""
llm = EdenAI(provider="openai", temperature=0.2, max_tokens=250)
output = await llm.agenerate(["Say foo:"])
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
def test_edenai_call_with_old_params() -> None:
"""
Test simple call to edenai with using `params`
to pass optional parameters to api
"""
llm = EdenAI(provider="openai", params={"temperature": 0.2, "max_tokens": 250})
output = llm.invoke("Say foo:")
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
def test_api_key_is_secret_string() -> None:
llm = EdenAI(provider="openai", edenai_api_key="secret-api-key")
assert isinstance(llm.edenai_api_key, SecretStr)
def test_uses_actual_secret_value() -> None:
llm = EdenAI(provider="openai", edenai_api_key="secret-api-key")
assert llm.edenai_api_key.get_secret_value() == "secret-api-key"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_together.py | """Test Together API wrapper.
In order to run this test, you need to have an Together api key.
You can get it by registering for free at https://api.together.xyz/.
A test key can be found at https://api.together.xyz/settings/api-keys
You'll then need to set TOGETHER_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_community.llms import Together
def test_together_call() -> None:
"""Test simple call to together."""
llm = Together(
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "together"
assert isinstance(output, str)
async def test_together_acall() -> None:
"""Test simple call to together."""
llm = Together(
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "together"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_mosaicml.py | """Test MosaicML API wrapper."""
import re
import pytest
from langchain_community.llms.mosaicml import PROMPT_FOR_GENERATION_FORMAT, MosaicML
def test_mosaicml_llm_call() -> None:
"""Test valid call to MosaicML."""
llm = MosaicML(model_kwargs={})
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_mosaicml_endpoint_change() -> None:
"""Test valid call to MosaicML."""
new_url = "https://models.hosted-on.mosaicml.hosting/mpt-30b-instruct/v1/predict"
llm = MosaicML(endpoint_url=new_url)
assert llm.endpoint_url == new_url
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_mosaicml_extra_kwargs() -> None:
llm = MosaicML(model_kwargs={"max_new_tokens": 1})
assert llm.model_kwargs == {"max_new_tokens": 1}
output = llm.invoke("Say foo:")
assert isinstance(output, str)
# should only generate one new token (which might be a new line or whitespace token)
assert len(output.split()) <= 1
def test_instruct_prompt() -> None:
"""Test instruct prompt."""
llm = MosaicML(inject_instruction_format=True, model_kwargs={"max_new_tokens": 10})
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
output = llm.invoke(prompt)
assert isinstance(output, str)
def test_retry_logic() -> None:
"""Tests that two queries (which would usually exceed the rate limit) works"""
llm = MosaicML(inject_instruction_format=True, model_kwargs={"max_new_tokens": 10})
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
output = llm.invoke(prompt)
assert isinstance(output, str)
output = llm.invoke(prompt)
assert isinstance(output, str)
def test_short_retry_does_not_loop() -> None:
"""Tests that two queries with a short retry sleep does not infinite loop"""
llm = MosaicML(
inject_instruction_format=True,
model_kwargs={"do_sample": False},
retry_sleep=0.1,
)
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
with pytest.raises(
ValueError,
match=re.escape(
"Error raised by inference API: rate limit exceeded.\nResponse: You have "
"reached maximum request limit.\n"
),
):
for _ in range(10):
output = llm.invoke(prompt)
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_cloudflare_workersai.py | import responses
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
@responses.activate
def test_cloudflare_workersai_call() -> None:
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
json={"result": {"response": "4"}},
status=200,
)
llm = CloudflareWorkersAI(
account_id="my_account_id",
api_token="my_api_token",
model="@cf/meta/llama-2-7b-chat-int8",
)
output = llm.invoke("What is 2 + 2?")
assert output == "4"
@responses.activate
def test_cloudflare_workersai_stream() -> None:
response_body = ['data: {"response": "Hello"}', "data: [DONE]"]
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
body="\n".join(response_body),
status=200,
)
llm = CloudflareWorkersAI(
account_id="my_account_id",
api_token="my_api_token",
model="@cf/meta/llama-2-7b-chat-int8",
streaming=True,
)
outputs = []
for chunk in llm.stream("Say Hello"):
outputs.append(chunk)
assert "".join(outputs) == "Hello"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py | """Test PromptLayer OpenAI API wrapper."""
from pathlib import Path
from typing import Generator
import pytest
from langchain_community.llms.loading import load_llm
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
def test_promptlayer_openai_call() -> None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_promptlayer_openai_extra_kwargs() -> None:
"""Test extra kwargs to promptlayer openai."""
# Check that foo is saved in extra_kwargs.
llm = PromptLayerOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = PromptLayerOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
PromptLayerOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
def test_promptlayer_openai_stop_valid() -> None:
"""Test promptlayer openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg]
first_output = first_llm.invoke(query)
second_llm = PromptLayerOpenAI(temperature=0) # type: ignore[call-arg]
second_output = second_llm.invoke(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_promptlayer_openai_stop_error() -> None:
"""Test promptlayer openai stop logic on bad configuration."""
llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
def test_promptlayer_openai_streaming() -> None:
"""Test streaming tokens from promptalyer OpenAI."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token["choices"][0]["text"], str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_stochasticai.py | """Test StochasticAI API wrapper."""
from langchain_community.llms.stochasticai import StochasticAI
def test_stochasticai_call() -> None:
"""Test valid call to StochasticAI."""
llm = StochasticAI()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_petals.py | """Test Petals API wrapper."""
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.petals import Petals
def test_api_key_is_string() -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert isinstance(llm.huggingface_api_key, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
print(llm.huggingface_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_gooseai_call() -> None:
"""Test valid call to gooseai."""
llm = Petals(max_new_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_openllm.py | """Test OpenLLM wrapper."""
from langchain_community.llms.openllm import OpenLLM
def test_openllm_llm_local() -> None:
llm = OpenLLM(model_name="flan-t5", model_id="google/flan-t5-small")
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_openllm_with_kwargs() -> None:
llm = OpenLLM(
model_name="flan-t5", model_id="google/flan-t5-small", temperature=0.84
)
output = llm.invoke("Say bar:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_ipex_llm.py | """Test IPEX LLM"""
import os
from typing import Any
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import IpexLLM
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_IDS environment variable not set."
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
device = os.getenv("TEST_IPEXLLM_MODEL_DEVICE") or "cpu"
def load_model(model_id: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
model_kwargs={
"temperature": 0,
"max_length": 16,
"trust_remote_code": True,
"device": device,
},
)
return llm
def load_model_more_types(model_id: str, load_in_low_bit: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
load_in_low_bit=load_in_low_bit,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
return llm
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_call(model_id: str) -> None:
"""Test valid call."""
llm = load_model(model_id)
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_asym_int4(model_id: str) -> None:
"""Test asym int4 data type."""
llm = load_model_more_types(model_id=model_id, load_in_low_bit="asym_int4")
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_generate(model_id: str) -> None:
"""Test valid generate."""
llm = load_model(model_id)
output = llm.generate(["Hello!"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_save_load_lowbit(model_id: str) -> None:
"""Test save and load lowbit model."""
saved_lowbit_path = "/tmp/saved_model"
llm = load_model(model_id)
llm.model.save_low_bit(saved_lowbit_path)
del llm
loaded_llm = IpexLLM.from_model_id_low_bit(
model_id=saved_lowbit_path,
tokenizer_id=model_id,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
output = loaded_llm.invoke("Hello!")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_gradient_ai.py | """Test GradientAI API wrapper.
In order to run this test, you need to have an GradientAI api key.
You can get it by registering for free at https://gradient.ai/.
You'll then need to set:
- `GRADIENT_ACCESS_TOKEN` environment variable to your api key.
- `GRADIENT_WORKSPACE_ID` environment variable to your workspace id.
- `GRADIENT_MODEL` environment variable to your workspace id.
"""
import os
from langchain_community.llms import GradientLLM
def test_gradient_acall() -> None:
"""Test simple call to gradient.ai."""
model = os.environ["GRADIENT_MODEL"]
gradient_access_token = os.environ["GRADIENT_ACCESS_TOKEN"]
gradient_workspace_id = os.environ["GRADIENT_WORKSPACE_ID"]
llm = GradientLLM(
model=model,
gradient_access_token=gradient_access_token,
gradient_workspace_id=gradient_workspace_id,
)
output = llm.invoke("Say hello:", temperature=0.2, max_tokens=250)
assert llm._llm_type == "gradient"
assert isinstance(output, str)
assert len(output)
async def test_gradientai_acall() -> None:
"""Test async call to gradient.ai."""
model = os.environ["GRADIENT_MODEL"]
gradient_access_token = os.environ["GRADIENT_ACCESS_TOKEN"]
gradient_workspace_id = os.environ["GRADIENT_WORKSPACE_ID"]
llm = GradientLLM(
model=model,
gradient_access_token=gradient_access_token,
gradient_workspace_id=gradient_workspace_id,
)
output = await llm.agenerate(["Say hello:"], temperature=0.2, max_tokens=250)
assert llm._llm_type == "gradient"
assert isinstance(output, str)
assert len(output)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_llamafile.py | import os
from typing import Generator
import pytest
import requests
from requests.exceptions import ConnectionError, HTTPError
from langchain_community.llms.llamafile import Llamafile
LLAMAFILE_SERVER_BASE_URL = os.getenv(
"LLAMAFILE_SERVER_BASE_URL", "http://localhost:8080"
)
def _ping_llamafile_server() -> bool:
try:
response = requests.get(LLAMAFILE_SERVER_BASE_URL)
response.raise_for_status()
except (ConnectionError, HTTPError):
return False
return True
@pytest.mark.skipif(
not _ping_llamafile_server(),
reason=f"unable to find llamafile server at {LLAMAFILE_SERVER_BASE_URL}, "
f"please start one and re-run this test",
)
def test_llamafile_call() -> None:
llm = Llamafile()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
@pytest.mark.skipif(
not _ping_llamafile_server(),
reason=f"unable to find llamafile server at {LLAMAFILE_SERVER_BASE_URL}, "
f"please start one and re-run this test",
)
def test_llamafile_streaming() -> None:
llm = Llamafile(streaming=True)
generator = llm.stream("Tell me about Roman dodecahedrons.")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_layerup_security.py | from typing import Any, List, Optional
import pytest
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_community.llms.layerup_security import LayerupSecurity
class MockLLM(LLM):
@property
def _llm_type(self) -> str:
return "mock_llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return "Hi Bob! How are you?"
def test_layerup_security_with_invalid_api_key() -> None:
mock_llm = MockLLM()
layerup_security = LayerupSecurity( # type: ignore[call-arg]
llm=mock_llm,
layerup_api_key="-- invalid API key --",
layerup_api_base_url="https://api.uselayerup.com/v1",
prompt_guardrails=[],
response_guardrails=["layerup.hallucination"],
mask=False,
metadata={"customer": "example@uselayerup.com"},
handle_response_guardrail_violation=(
lambda violation: (
"Custom canned response with dynamic data! "
"The violation rule was {offending_guardrail}."
).format(offending_guardrail=violation["offending_guardrail"])
),
)
with pytest.raises(Exception):
layerup_security.invoke("My name is Bob Dylan. My SSN is 123-45-6789.")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_ctransformers.py | """Test C Transformers wrapper."""
from langchain_community.llms import CTransformers
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_ctransformers_call() -> None:
"""Test valid call to C Transformers."""
config = {"max_new_tokens": 5}
callback_handler = FakeCallbackHandler()
llm = CTransformers(
model="marella/gpt-2-ggml",
config=config,
callbacks=[callback_handler],
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
assert len(output) > 1
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
async def test_ctransformers_async_inference() -> None:
config = {"max_new_tokens": 5}
callback_handler = FakeCallbackHandler()
llm = CTransformers(
model="marella/gpt-2-ggml",
config=config,
callbacks=[callback_handler],
)
output = await llm._acall(prompt="Say foo:")
assert isinstance(output, str)
assert len(output) > 1
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_gpt4all.py | # flake8: noqa
"""Test Llama.cpp wrapper."""
import os
from urllib.request import urlretrieve
from langchain_community.llms import GPT4All
def _download_model() -> str:
"""Download model."""
model_url = "http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin"
local_filename = model_url.split("/")[-1]
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
return local_filename
def test_gpt4all_inference() -> None:
"""Test valid gpt4all inference."""
model_path = _download_model()
llm = GPT4All(model=model_path)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_aleph_alpha.py | """Test Aleph Alpha API wrapper."""
from langchain_community.llms.aleph_alpha import AlephAlpha
def test_aleph_alpha_call() -> None:
"""Test valid call to cohere."""
llm = AlephAlpha(maximum_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_weight_only_quantization.py | """Test HuggingFace Pipeline wrapper."""
from langchain_community.llms.weight_only_quantization import WeightOnlyQuantPipeline
model_id = "google/flan-t5-large"
def test_weight_only_quantization_with_config() -> None:
"""Test valid call to HuggingFace text2text model."""
from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig
conf = WeightOnlyQuantConfig(weight_dtype="nf4")
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", quantization_config=conf
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_weight_only_quantization_4bit() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", load_in_4bit=True
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_weight_only_quantization_8bit() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", load_in_8bit=True
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_init_with_pipeline() -> None:
"""Test initialization with a HF pipeline."""
from intel_extension_for_transformers.transformers import AutoModelForSeq2SeqLM
from transformers import AutoTokenizer, pipeline
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id, load_in_4bit=True, use_llm_runtime=False
)
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
llm = WeightOnlyQuantPipeline(pipeline=pipe)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def text_weight_only_pipeline_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig
conf = WeightOnlyQuantConfig()
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="summarization", quantization_config=conf
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_huggingface_text_gen_inference.py | from langchain_community.llms import HuggingFaceTextGenInference
def test_invocation_params_stop_sequences() -> None:
llm = HuggingFaceTextGenInference()
assert llm._default_params["stop_sequences"] == []
runtime_stop = None
assert llm._invocation_params(runtime_stop)["stop_sequences"] == []
assert llm._default_params["stop_sequences"] == []
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == ["stop"]
assert llm._default_params["stop_sequences"] == []
llm = HuggingFaceTextGenInference(stop_sequences=["."])
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == [".", "stop"]
assert llm._default_params["stop_sequences"] == ["."]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_arcee.py | from unittest.mock import MagicMock, patch
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.arcee import Arcee
@patch("langchain_community.utilities.arcee.requests.get")
def test_arcee_api_key_is_secret_string(mock_get: MagicMock) -> None:
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
"model_id": "",
"status": "training_complete",
}
arcee_without_env_var = Arcee(
model="DALM-PubMed",
arcee_api_key="secret_api_key",
arcee_api_url="https://localhost",
arcee_api_version="version",
)
assert isinstance(arcee_without_env_var.arcee_api_key, SecretStr)
@patch("langchain_community.utilities.arcee.requests.get")
def test_api_key_masked_when_passed_via_constructor(
mock_get: MagicMock, capsys: CaptureFixture
) -> None:
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
"model_id": "",
"status": "training_complete",
}
arcee_without_env_var = Arcee(
model="DALM-PubMed",
arcee_api_key="secret_api_key",
arcee_api_url="https://localhost",
arcee_api_version="version",
)
print(arcee_without_env_var.arcee_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert "**********" == captured.out
@patch("langchain_community.utilities.arcee.requests.get")
def test_api_key_masked_when_passed_from_env(
mock_get: MagicMock, capsys: CaptureFixture, monkeypatch: MonkeyPatch
) -> None:
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
"model_id": "",
"status": "training_complete",
}
monkeypatch.setenv("ARCEE_API_KEY", "secret_api_key")
arcee_with_env_var = Arcee(
model="DALM-PubMed",
arcee_api_url="https://localhost",
arcee_api_version="version",
)
print(arcee_with_env_var.arcee_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert "**********" == captured.out
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py | """Test OctoAI API wrapper."""
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
def test_octoai_endpoint_call() -> None:
"""Test valid call to OctoAI endpoint."""
llm = OctoAIEndpoint()
output = llm.invoke("Which state is Los Angeles in?")
print(output) # noqa: T201
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_manifest.py | """Test manifest integration."""
from langchain_community.llms.manifest import ManifestWrapper
def test_manifest_wrapper() -> None:
"""Test manifest wrapper."""
from manifest import Manifest
manifest = Manifest(client_name="openai")
llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0})
output = llm.invoke("The capital of New York is:")
assert output == "Albany"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_modal.py | """Test Modal API wrapper."""
from langchain_community.llms.modal import Modal
def test_modal_call() -> None:
"""Test valid call to Modal."""
llm = Modal()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_bedrock.py | """
Test Amazon Bedrock API wrapper and services i.e 'Guardrails for Amazon Bedrock'.
You can get a list of models from the bedrock client by running 'bedrock_models()'
"""
import os
from typing import Any
import pytest
from langchain_core.callbacks import AsyncCallbackHandler
from langchain_community.llms.bedrock import Bedrock
# this is the guardrails id for the model you want to test
GUARDRAILS_ID = os.environ.get("GUARDRAILS_ID", "7jarelix77")
# this is the guardrails version for the model you want to test
GUARDRAILS_VERSION = os.environ.get("GUARDRAILS_VERSION", "1")
# this should trigger the guardrails - you can change this to any text you want which
# will trigger the guardrails
GUARDRAILS_TRIGGER = os.environ.get(
"GUARDRAILS_TRIGGERING_QUERY", "I want to talk about politics."
)
class BedrockAsyncCallbackHandler(AsyncCallbackHandler):
"""Async callback handler that can be used to handle callbacks from langchain."""
guardrails_intervened: bool = False
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> Any:
reason = kwargs.get("reason")
if reason == "GUARDRAIL_INTERVENED":
self.guardrails_intervened = True
def get_response(self): # type: ignore[no-untyped-def]
return self.guardrails_intervened
@pytest.fixture(autouse=True)
def bedrock_runtime_client(): # type: ignore[no-untyped-def]
import boto3
try:
client = boto3.client(
"bedrock-runtime",
region_name=os.environ.get("AWS_REGION", "us-east-1"),
)
return client
except Exception as e:
pytest.fail(f"can not connect to bedrock-runtime client: {e}", pytrace=False)
@pytest.fixture(autouse=True)
def bedrock_client(): # type: ignore[no-untyped-def]
import boto3
try:
client = boto3.client(
"bedrock",
region_name=os.environ.get("AWS_REGION", "us-east-1"),
)
return client
except Exception as e:
pytest.fail(f"can not connect to bedrock client: {e}", pytrace=False)
@pytest.fixture
def bedrock_models(bedrock_client): # type: ignore[no-untyped-def]
"""List bedrock models."""
response = bedrock_client.list_foundation_models().get("modelSummaries")
models = {}
for model in response:
models[model.get("modelId")] = model.get("modelName")
return models
def test_claude_instant_v1(bedrock_runtime_client, bedrock_models): # type: ignore[no-untyped-def]
try:
llm = Bedrock(
model_id="anthropic.claude-instant-v1",
client=bedrock_runtime_client,
model_kwargs={},
)
output = llm.invoke("Say something positive:")
assert isinstance(output, str)
except Exception as e:
pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False)
def test_amazon_bedrock_guardrails_no_intervention_for_valid_query( # type: ignore[no-untyped-def]
bedrock_runtime_client, bedrock_models
):
try:
llm = Bedrock(
model_id="anthropic.claude-instant-v1",
client=bedrock_runtime_client,
model_kwargs={},
guardrails={
"id": GUARDRAILS_ID,
"version": GUARDRAILS_VERSION,
"trace": False,
},
)
output = llm.invoke("Say something positive:")
assert isinstance(output, str)
except Exception as e:
pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False)
def test_amazon_bedrock_guardrails_intervention_for_invalid_query( # type: ignore[no-untyped-def]
bedrock_runtime_client, bedrock_models
):
try:
handler = BedrockAsyncCallbackHandler()
llm = Bedrock(
model_id="anthropic.claude-instant-v1",
client=bedrock_runtime_client,
model_kwargs={},
guardrails={
"id": GUARDRAILS_ID,
"version": GUARDRAILS_VERSION,
"trace": True,
},
callbacks=[handler],
)
except Exception as e:
pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False)
else:
llm.invoke(GUARDRAILS_TRIGGER)
guardrails_intervened = handler.get_response()
assert guardrails_intervened is True
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_huggingface_pipeline.py | """Test HuggingFace Pipeline wrapper."""
from pathlib import Path
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_pipeline_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text generation model."""
llm = HuggingFacePipeline.from_model_id(
model_id="google/flan-t5-small", task="text2text-generation"
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_device_map() -> None:
"""Test pipelines specifying the device map parameter."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
device_map="auto",
pipeline_kwargs={"max_new_tokens": 10},
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def text_huggingface_pipeline_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFacePipeline.from_model_id(
model_id="facebook/bart-large-cnn", task="summarization"
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}
)
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
def test_init_with_pipeline() -> None:
"""Test initialization with a HF pipeline."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
llm = HuggingFacePipeline(pipeline=pipe)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_runtime_kwargs() -> None:
"""Test pipelines specifying the device map parameter."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
)
prompt = "Say foo:"
output = llm.invoke(prompt, pipeline_kwargs={"max_new_tokens": 2})
assert len(output) < 10
ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": ""}
def test_huggingface_pipeline_text_generation_ov() -> None:
"""Test valid call to HuggingFace text generation model with openvino."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
backend="openvino",
model_kwargs={"device": "CPU", "ov_config": ov_config},
pipeline_kwargs={"max_new_tokens": 64},
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_text2text_generation_ov() -> None:
"""Test valid call to HuggingFace text2text generation model with openvino."""
llm = HuggingFacePipeline.from_model_id(
model_id="google/flan-t5-small",
task="text2text-generation",
backend="openvino",
model_kwargs={"device": "CPU", "ov_config": ov_config},
pipeline_kwargs={"max_new_tokens": 64},
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def text_huggingface_pipeline_summarization_ov() -> None:
"""Test valid call to HuggingFace summarization model with openvino."""
llm = HuggingFacePipeline.from_model_id(
model_id="facebook/bart-large-cnn",
task="summarization",
backend="openvino",
model_kwargs={"device": "CPU", "ov_config": ov_config},
pipeline_kwargs={"max_new_tokens": 64},
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_huggingface_hub.py | """Test HuggingFace API wrapper."""
from pathlib import Path
import pytest
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceHub(repo_id="google/flan-t5-xl") # type: ignore[call-arg]
output = llm.invoke("The capital of New York is")
assert output == "Albany"
def test_huggingface_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("Say foo:")
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_llamacpp.py | # flake8: noqa
"""Test Llama.cpp wrapper."""
import os
from typing import Generator
from urllib.request import urlretrieve
import pytest
from langchain_community.llms import LlamaCpp
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def get_model() -> str:
"""Download model. f
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path."""
model_url = "https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin"
tokenizer_url = "https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model"
conversion_script = "https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py"
local_filename = model_url.split("/")[-1]
if not os.path.exists("convert-unversioned-ggml-to-ggml.py"):
urlretrieve(conversion_script, "convert-unversioned-ggml-to-ggml.py")
if not os.path.exists("tokenizer.model"):
urlretrieve(tokenizer_url, "tokenizer.model")
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system(f"python convert-unversioned-ggml-to-ggml.py . tokenizer.model")
return local_filename
def test_llamacpp_inference() -> None:
"""Test valid llama.cpp inference."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
assert len(output) > 1
def test_llamacpp_streaming() -> None:
"""Test streaming tokens from LlamaCpp."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path, max_tokens=10)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["'"])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert not isinstance(chunk, str)
# Note that this matches the OpenAI format:
assert isinstance(chunk["choices"][0]["text"], str)
stream_results_string += chunk["choices"][0]["text"]
assert len(stream_results_string.strip()) > 1
def test_llamacpp_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
MAX_TOKENS = 5
OFF_BY_ONE = 1 # There may be an off by one error in the upstream code!
callback_handler = FakeCallbackHandler()
llm = LlamaCpp(
model_path=get_model(),
callbacks=[callback_handler],
verbose=True,
max_tokens=MAX_TOKENS,
)
llm.invoke("Q: Can you count to 10? A:'1, ")
assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE
def test_llamacpp_model_kwargs() -> None:
llm = LlamaCpp(model_path=get_model(), model_kwargs={"n_gqa": None})
assert llm.model_kwargs == {"n_gqa": None}
def test_llamacpp_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
LlamaCpp(model_path=get_model(), model_kwargs={"n_ctx": 1024})
def test_llamacpp_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = LlamaCpp(model_path=get_model(), n_gqa=None)
llm.model_kwargs == {"n_gqa": None}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_watsonxllm.py | """Test WatsonxLLM API wrapper."""
from langchain_community.llms import WatsonxLLM
def test_watsonxllm_call() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
apikey="***",
project_id="***",
)
response = watsonxllm.invoke("What color sunflower is?")
assert isinstance(response, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_friendli.py | """Test Friendli API."""
import pytest
from langchain_core.outputs.generation import Generation
from langchain_core.outputs.llm_result import LLMResult
from langchain_community.llms.friendli import Friendli
@pytest.fixture
def friendli_llm() -> Friendli:
"""Friendli LLM."""
return Friendli(temperature=0, max_tokens=10)
def test_friendli_invoke(friendli_llm: Friendli) -> None:
"""Test invoke."""
output = friendli_llm.invoke("Say hello world.")
assert isinstance(output, str)
async def test_friendli_ainvoke(friendli_llm: Friendli) -> None:
"""Test async invoke."""
output = await friendli_llm.ainvoke("Say hello world.")
assert isinstance(output, str)
def test_friendli_batch(friendli_llm: Friendli) -> None:
"""Test batch."""
outputs = friendli_llm.batch(["Say hello world.", "Say bye world."])
for output in outputs:
assert isinstance(output, str)
async def test_friendli_abatch(friendli_llm: Friendli) -> None:
"""Test async batch."""
outputs = await friendli_llm.abatch(["Say hello world.", "Say bye world."])
for output in outputs:
assert isinstance(output, str)
def test_friendli_generate(friendli_llm: Friendli) -> None:
"""Test generate."""
result = friendli_llm.generate(["Say hello world.", "Say bye world."])
assert isinstance(result, LLMResult)
generations = result.generations
assert len(generations) == 2
for generation in generations:
gen_ = generation[0]
assert isinstance(gen_, Generation)
text = gen_.text
assert isinstance(text, str)
generation_info = gen_.generation_info
if generation_info is not None:
assert "token" in generation_info
async def test_friendli_agenerate(friendli_llm: Friendli) -> None:
"""Test async generate."""
result = await friendli_llm.agenerate(["Say hello world.", "Say bye world."])
assert isinstance(result, LLMResult)
generations = result.generations
assert len(generations) == 2
for generation in generations:
gen_ = generation[0]
assert isinstance(gen_, Generation)
text = gen_.text
assert isinstance(text, str)
generation_info = gen_.generation_info
if generation_info is not None:
assert "token" in generation_info
def test_friendli_stream(friendli_llm: Friendli) -> None:
"""Test stream."""
stream = friendli_llm.stream("Say hello world.")
for chunk in stream:
assert isinstance(chunk, str)
async def test_friendli_astream(friendli_llm: Friendli) -> None:
"""Test async stream."""
stream = friendli_llm.astream("Say hello world.")
async for chunk in stream:
assert isinstance(chunk, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_fireworks.py | """Test Fireworks AI API Wrapper."""
from typing import Generator
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms.fireworks import Fireworks
@pytest.fixture
def llm() -> Fireworks:
return Fireworks(model_kwargs={"temperature": 0, "max_tokens": 512})
@pytest.mark.scheduled
def test_fireworks_call(llm: Fireworks) -> None:
"""Test valid call to fireworks."""
output = llm.invoke("How is the weather in New York today?")
assert isinstance(output, str)
@pytest.mark.scheduled
def test_fireworks_model_param() -> None:
"""Tests model parameters for Fireworks"""
llm = Fireworks(model="foo")
assert llm.model == "foo"
@pytest.mark.scheduled
def test_fireworks_invoke(llm: Fireworks) -> None:
"""Tests completion with invoke"""
output = llm.invoke("How is the weather in New York today?", stop=[","])
assert isinstance(output, str)
assert output[-1] == ","
@pytest.mark.scheduled
async def test_fireworks_ainvoke(llm: Fireworks) -> None:
"""Tests completion with invoke"""
output = await llm.ainvoke("How is the weather in New York today?", stop=[","])
assert isinstance(output, str)
assert output[-1] == ","
@pytest.mark.scheduled
def test_fireworks_batch(llm: Fireworks) -> None:
"""Tests completion with invoke"""
llm = Fireworks()
output = llm.batch(
[
"How is the weather in New York today?",
"How is the weather in New York today?",
],
stop=[","],
)
for token in output:
assert isinstance(token, str)
assert token[-1] == ","
@pytest.mark.scheduled
async def test_fireworks_abatch(llm: Fireworks) -> None:
"""Tests completion with invoke"""
output = await llm.abatch(
[
"How is the weather in New York today?",
"How is the weather in New York today?",
],
stop=[","],
)
for token in output:
assert isinstance(token, str)
assert token[-1] == ","
@pytest.mark.scheduled
def test_fireworks_multiple_prompts(
llm: Fireworks,
) -> None:
"""Test completion with multiple prompts."""
output = llm.generate(["How is the weather in New York today?", "I'm pickle rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
@pytest.mark.scheduled
def test_fireworks_streaming(llm: Fireworks) -> None:
"""Test stream completion."""
generator = llm.stream("Who's the best quarterback in the NFL?")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
@pytest.mark.scheduled
def test_fireworks_streaming_stop_words(llm: Fireworks) -> None:
"""Test stream completion with stop words."""
generator = llm.stream("Who's the best quarterback in the NFL?", stop=[","])
assert isinstance(generator, Generator)
last_token = ""
for token in generator:
last_token = token
assert isinstance(token, str)
assert last_token[-1] == ","
@pytest.mark.scheduled
async def test_fireworks_streaming_async(llm: Fireworks) -> None:
"""Test stream completion."""
last_token = ""
async for token in llm.astream(
"Who's the best quarterback in the NFL?", stop=[","]
):
last_token = token
assert isinstance(token, str)
assert last_token[-1] == ","
@pytest.mark.scheduled
async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
"""Test async."""
output = await llm.agenerate(["What is the best city to live in California?"])
assert isinstance(output, LLMResult)
@pytest.mark.scheduled
async def test_fireworks_multiple_prompts_async_agenerate(llm: Fireworks) -> None:
output = await llm.agenerate(
["How is the weather in New York today?", "I'm pickle rick"]
)
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_chatglm.py | """Test ChatGLM API wrapper."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.chatglm import ChatGLM
def test_chatglm_call() -> None:
"""Test valid call to chatglm."""
llm = ChatGLM()
output = llm.invoke("北京和上海这两座城市有什么不同?")
assert isinstance(output, str)
def test_chatglm_generate() -> None:
"""Test valid call to chatglm."""
llm = ChatGLM()
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_nlpcloud.py | """Test NLPCloud API wrapper."""
from pathlib import Path
from typing import cast
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.loading import load_llm
from langchain_community.llms.nlpcloud import NLPCloud
from tests.integration_tests.llms.utils import assert_llm_equality
def test_nlpcloud_call() -> None:
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an NLPCloud LLM."""
llm = NLPCloud(max_length=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "nlpcloud.yaml")
loaded_llm = load_llm(tmp_path / "nlpcloud.yaml")
assert_llm_equality(llm, loaded_llm)
def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> None:
"""Test that nlpcloud api key is a secret key."""
# test initialization from init
assert isinstance(NLPCloud(nlpcloud_api_key="1").nlpcloud_api_key, SecretStr) # type: ignore[arg-type, call-arg]
monkeypatch.setenv("NLPCLOUD_API_KEY", "secret-api-key")
llm = NLPCloud() # type: ignore[call-arg]
assert isinstance(llm.nlpcloud_api_key, SecretStr)
assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key"
print(llm.nlpcloud_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_google_palm.py | """Test Google GenerativeAI API wrapper.
Note: This test must be run with the GOOGLE_API_KEY environment variable set to a
valid API key.
"""
from pathlib import Path
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms.google_palm import GooglePalm
from langchain_community.llms.loading import load_llm
model_names = [None, "models/text-bison-001", "gemini-pro"]
@pytest.mark.parametrize(
"model_name",
model_names,
)
def test_google_generativeai_call(model_name: str) -> None:
"""Test valid call to Google GenerativeAI text API."""
if model_name:
llm = GooglePalm(max_output_tokens=10, model_name=model_name) # type: ignore[call-arg]
else:
llm = GooglePalm(max_output_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
assert llm._llm_type == "google_palm"
if model_name and "gemini" in model_name:
assert llm.client.model_name == "models/gemini-pro"
else:
assert llm.model_name == "models/text-bison-001"
@pytest.mark.parametrize(
"model_name",
model_names,
)
def test_google_generativeai_generate(model_name: str) -> None:
n = 1 if model_name == "gemini-pro" else 2
if model_name:
llm = GooglePalm(temperature=0.3, n=n, model_name=model_name) # type: ignore[call-arg]
else:
llm = GooglePalm(temperature=0.3, n=n) # type: ignore[call-arg]
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == n
def test_google_generativeai_get_num_tokens() -> None:
llm = GooglePalm() # type: ignore[call-arg]
output = llm.get_num_tokens("How are you?")
assert output == 4
async def test_google_generativeai_agenerate() -> None:
llm = GooglePalm(temperature=0, model_name="gemini-pro") # type: ignore[call-arg]
output = await llm.agenerate(["Please say foo:"])
assert isinstance(output, LLMResult)
def test_generativeai_stream() -> None:
llm = GooglePalm(temperature=0, model_name="gemini-pro") # type: ignore[call-arg]
outputs = list(llm.stream("Please say foo:"))
assert isinstance(outputs[0], str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading a Google PaLM LLM."""
llm = GooglePalm(max_output_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "google_palm.yaml")
loaded_llm = load_llm(tmp_path / "google_palm.yaml")
assert loaded_llm == llm
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_azureml_endpoint.py | """Test AzureML Endpoint wrapper."""
import json
import os
from pathlib import Path
from typing import Dict
from urllib.request import HTTPError
import pytest
from pydantic import ValidationError
from langchain_community.llms.azureml_endpoint import (
AzureMLOnlineEndpoint,
ContentFormatterBase,
DollyContentFormatter,
HFContentFormatter,
OSSContentFormatter,
)
from langchain_community.llms.loading import load_llm
def test_gpt2_call() -> None:
"""Test valid call to GPT2."""
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=OSSContentFormatter(),
)
output = llm.invoke("Foo")
assert isinstance(output, str)
def test_hf_call() -> None:
"""Test valid call to HuggingFace Foundation Model."""
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("HF_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("HF_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("HF_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=HFContentFormatter(),
)
output = llm.invoke("Foo")
assert isinstance(output, str)
def test_dolly_call() -> None:
"""Test valid call to dolly-v2."""
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("DOLLY_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("DOLLY_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("DOLLY_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=DollyContentFormatter(),
)
output = llm.invoke("Foo")
assert isinstance(output, str)
def test_custom_formatter() -> None:
"""Test ability to create a custom content formatter."""
class CustomFormatter(ContentFormatterBase):
content_type: str = "application/json"
accepts: str = "application/json"
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: # type: ignore[override]
input_str = json.dumps(
{
"inputs": [prompt],
"parameters": model_kwargs,
"options": {"use_cache": False, "wait_for_model": True},
}
)
return input_str.encode("utf-8")
def format_response_payload(self, output: bytes) -> str: # type: ignore[override]
response_json = json.loads(output)
return response_json[0]["summary_text"]
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("BART_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("BART_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("BART_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=CustomFormatter(),
)
output = llm.invoke("Foo")
assert isinstance(output, str)
def test_missing_content_formatter() -> None:
"""Test AzureML LLM without a content_formatter attribute"""
with pytest.raises(AttributeError):
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
)
llm.invoke("Foo")
def test_invalid_request_format() -> None:
"""Test invalid request format."""
class CustomContentFormatter(ContentFormatterBase):
content_type: str = "application/json"
accepts: str = "application/json"
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: # type: ignore[override]
input_str = json.dumps(
{
"incorrect_input": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str: # type: ignore[override]
response_json = json.loads(output)
return response_json[0]["0"]
with pytest.raises(HTTPError):
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=CustomContentFormatter(),
)
llm.invoke("Foo")
def test_incorrect_url() -> None:
"""Testing AzureML Endpoint for an incorrect URL"""
with pytest.raises(ValidationError):
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url="https://endpoint.inference.com",
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=OSSContentFormatter(),
)
llm.invoke("Foo")
def test_incorrect_api_type() -> None:
with pytest.raises(ValidationError):
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type]
endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
endpoint_api_type="serverless", # type: ignore[arg-type]
content_formatter=OSSContentFormatter(),
)
llm.invoke("Foo")
def test_incorrect_key() -> None:
"""Testing AzureML Endpoint for incorrect key"""
with pytest.raises(HTTPError):
llm = AzureMLOnlineEndpoint(
endpoint_api_key="incorrect-key", # type: ignore[arg-type]
endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type]
deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type]
content_formatter=OSSContentFormatter(),
)
llm.invoke("Foo")
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an AzureML Foundation Model LLM."""
save_llm = AzureMLOnlineEndpoint(
deployment_name="databricks-dolly-v2-12b-4",
model_kwargs={"temperature": 0.03, "top_p": 0.4, "max_tokens": 200},
)
save_llm.save(file_path=tmp_path / "azureml.yaml")
loaded_llm = load_llm(tmp_path / "azureml.yaml")
assert loaded_llm == save_llm
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_bittensor.py | """Test Bittensor Validator Endpoint wrapper."""
from langchain_community.llms import NIBittensorLLM
def test_bittensor_call() -> None:
"""Test valid call to validator endpoint."""
llm = NIBittensorLLM(system_prompt="Your task is to answer user prompt.")
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_xinference.py | """Test Xinference wrapper."""
import time
from typing import AsyncGenerator, Tuple
import pytest_asyncio
from langchain_community.llms import Xinference
@pytest_asyncio.fixture
async def setup() -> AsyncGenerator[Tuple[str, str], None]:
import xoscar as xo
from xinference.deploy.supervisor import start_supervisor_components
from xinference.deploy.utils import create_worker_actor_pool
from xinference.deploy.worker import start_worker_components
pool = await create_worker_actor_pool(
f"test://127.0.0.1:{xo.utils.get_next_port()}"
)
print(f"Pool running on localhost:{pool.external_address}") # noqa: T201
endpoint = await start_supervisor_components(
pool.external_address, "127.0.0.1", xo.utils.get_next_port()
)
await start_worker_components(
address=pool.external_address, supervisor_address=pool.external_address
)
# wait for the api.
time.sleep(3)
async with pool:
yield endpoint, pool.external_address
def test_xinference_llm_(setup: Tuple[str, str]) -> None:
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(
model_name="vicuna-v1.3", model_size_in_billions=7, quantization="q4_0"
)
llm = Xinference(server_url=endpoint, model_uid=model_uid)
answer = llm.invoke("Q: What food can we try in the capital of France? A:")
assert isinstance(answer, str)
answer = llm.invoke(
"Q: where can we visit in the capital of France? A:",
generate_config={"max_tokens": 1024, "stream": True},
)
assert isinstance(answer, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_anyscale.py | """Test Anyscale API wrapper."""
from langchain_community.llms.anyscale import Anyscale
def test_anyscale_call() -> None:
"""Test valid call to Anyscale."""
llm = Anyscale()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_yuan2.py | """Test Yuan2.0 API wrapper."""
from langchain_core.outputs import LLMResult
from langchain_community.llms import Yuan2
def test_yuan2_call_method() -> None:
"""Test valid call to Yuan2.0."""
llm = Yuan2(
infer_api="http://127.0.0.1:8000/yuan",
max_tokens=1024,
temp=1.0,
top_p=0.9,
use_history=False,
)
output = llm.invoke("写一段快速排序算法。")
assert isinstance(output, str)
def test_yuan2_generate_method() -> None:
"""Test valid call to Yuan2.0 inference api."""
llm = Yuan2(
infer_api="http://127.0.0.1:8000/yuan",
max_tokens=1024,
temp=1.0,
top_p=0.9,
use_history=False,
)
output = llm.generate(["who are you?"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_cohere.py | """Test Cohere API wrapper."""
from pathlib import Path
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.cohere import Cohere
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_cohere_call() -> None:
"""Test valid call to cohere."""
llm = Cohere(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_cohere_api_key(monkeypatch: MonkeyPatch) -> None:
"""Test that cohere api key is a secret key."""
# test initialization from init
assert isinstance(Cohere(cohere_api_key="1").cohere_api_key, SecretStr) # type: ignore[arg-type, call-arg]
# test initialization from env variable
monkeypatch.setenv("COHERE_API_KEY", "secret-api-key")
assert isinstance(Cohere().cohere_api_key, SecretStr) # type: ignore[call-arg]
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an Cohere LLM."""
llm = Cohere(max_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "cohere.yaml")
loaded_llm = load_llm(tmp_path / "cohere.yaml")
assert_llm_equality(llm, loaded_llm)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_bigdl_llm.py | """Test BigdlLLM"""
import os
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms.bigdl_llm import BigdlLLM
model_ids_to_test = os.getenv("TEST_BIGDLLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_BIGDLLLM_MODEL_IDS environment variable not set.",
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_call(model_id: str) -> None:
"""Test valid call to bigdl-llm."""
llm = BigdlLLM.from_model_id(
model_id=model_id,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_generate(model_id: str) -> None:
"""Test valid call to bigdl-llm."""
llm = BigdlLLM.from_model_id(
model_id=model_id,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
output = llm.generate(["Hello!"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_mlx_pipeline.py | """Test MLX Pipeline wrapper."""
from langchain_community.llms.mlx_pipeline import MLXPipeline
def test_mlx_pipeline_text_generation() -> None:
"""Test valid call to MLX text generation model."""
llm = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b",
pipeline_kwargs={"max_tokens": 10},
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_init_with_model_and_tokenizer() -> None:
"""Test initialization with a HF pipeline."""
from mlx_lm import load
model, tokenizer = load("mlx-community/quantized-gemma-2b")
llm = MLXPipeline(model=model, tokenizer=tokenizer)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_runtime_kwargs() -> None:
"""Test pipelines specifying the device map parameter."""
llm = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b",
)
prompt = "Say foo:"
output = llm.invoke(prompt, pipeline_kwargs={"max_tokens": 2})
assert len(output) < 10
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_gooseai.py | """Test GooseAI API wrapper."""
from langchain_community.llms.gooseai import GooseAI
def test_gooseai_call() -> None:
"""Test valid call to gooseai."""
llm = GooseAI(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_gooseai_call_fairseq() -> None:
"""Test valid call to gooseai with fairseq model."""
llm = GooseAI(model_name="fairseq-1-3b", max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_gooseai_stop_valid() -> None:
"""Test gooseai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = GooseAI(stop="3", temperature=0) # type: ignore[call-arg]
first_output = first_llm.invoke(query)
second_llm = GooseAI(temperature=0) # type: ignore[call-arg]
second_output = second_llm.invoke(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_banana.py | """Test BananaDev API wrapper."""
from langchain_community.llms.bananadev import Banana
def test_banana_call() -> None:
"""Test valid call to BananaDev."""
llm = Banana()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_forefrontai.py | """Test ForefrontAI API wrapper."""
from langchain_community.llms.forefrontai import ForefrontAI
def test_forefrontai_call() -> None:
"""Test valid call to forefrontai."""
llm = ForefrontAI(length=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_opaqueprompts.py | from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel
import langchain_community.utilities.opaqueprompts as op
from langchain_community.llms import OpenAI
from langchain_community.llms.opaqueprompts import OpaquePrompts
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is johndoe@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = PromptTemplate.from_template(prompt_template) | OpaquePrompts(llm=OpenAI()) # type: ignore[call-arg]
output = chain.invoke(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableParallel(
secure_context=lambda x: x["secure_context"], # type: ignore
response=(lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_volcengine_maas.py | """Test volc engine maas LLM model."""
from typing import Generator
from langchain_core.outputs import LLMResult
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.volcengine_maas import (
VolcEngineMaasBase,
VolcEngineMaasLLM,
)
def test_api_key_is_string() -> None:
llm = VolcEngineMaasBase( # type: ignore[call-arg]
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = VolcEngineMaasBase( # type: ignore[call-arg]
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
print(llm.volc_engine_maas_ak, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_default_call() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM() # type: ignore[call-arg]
output = llm.invoke("tell me a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM() # type: ignore[call-arg]
output = llm.generate(["tell me a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True) # type: ignore[call-arg]
output = llm.stream("tell me a joke")
assert isinstance(output, Generator)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/utils.py | """Utils for LLM Tests."""
from langchain_core.language_models.llms import BaseLLM
from langchain_core.utils.pydantic import get_fields
def assert_llm_equality(llm: BaseLLM, loaded_llm: BaseLLM) -> None:
"""Assert LLM Equality for tests."""
# Check that they are the same type.
assert type(llm) is type(loaded_llm)
# Client field can be session based, so hash is different despite
# all other values being the same, so just assess all other fields
for field in get_fields(llm).keys():
if field != "client" and field != "pipeline":
val = getattr(llm, field)
new_val = getattr(loaded_llm, field)
assert new_val == val
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_beam.py | """Test Beam API wrapper."""
from langchain_community.llms.beam import Beam
def test_beam_call() -> None:
"""Test valid call to Beam."""
llm = Beam(
model_name="gpt2",
name="langchain-gpt2",
cpu=8, # type: ignore[arg-type]
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",
],
max_length="5",
)
llm._deploy()
output = llm._call("Your prompt goes here")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py | """Test HuggingFace Endpoints."""
from pathlib import Path
import pytest
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_endpoint_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceEndpoint(endpoint_url="", model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("Say foo:")
def test_saving_loading_endpoint_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
endpoint_url="", task="text-generation", model_kwargs={"max_new_tokens": 10}
)
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
def test_huggingface_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
print(output) # noqa: T201
assert isinstance(output, str)
def test_huggingface_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceEndpoint(repo_id="google/flan-t5-xl") # type: ignore[call-arg]
output = llm.invoke("The capital of New York is")
assert output == "Albany"
def test_huggingface_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceEndpoint(repo_id="facebook/bart-large-cnn") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("Say foo:")
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceEndpoint LLM."""
llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
def test_invocation_params_stop_sequences() -> None:
llm = HuggingFaceEndpoint() # type: ignore[call-arg]
assert llm._default_params["stop_sequences"] == []
runtime_stop = None
assert llm._invocation_params(runtime_stop)["stop_sequences"] == []
assert llm._default_params["stop_sequences"] == []
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == ["stop"]
assert llm._default_params["stop_sequences"] == []
llm = HuggingFaceEndpoint(stop_sequences=["."]) # type: ignore[call-arg]
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == [".", "stop"]
assert llm._default_params["stop_sequences"] == ["."]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/__init__.py | """All integration tests for LLM objects."""
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_deepinfra.py | """Test DeepInfra API wrapper."""
from langchain_community.llms.deepinfra import DeepInfra
def test_deepinfra_call() -> None:
"""Test valid call to DeepInfra."""
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
output = llm.invoke("What is 2 + 2?")
assert isinstance(output, str)
async def test_deepinfra_acall() -> None:
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
output = await llm.ainvoke("What is 2 + 2?")
assert llm._llm_type == "deepinfra"
assert isinstance(output, str)
def test_deepinfra_stream() -> None:
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
num_chunks = 0
for chunk in llm.stream("[INST] Hello [/INST] "):
num_chunks += 1
assert num_chunks > 0
async def test_deepinfra_astream() -> None:
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
num_chunks = 0
async for chunk in llm.astream("[INST] Hello [/INST] "):
num_chunks += 1
assert num_chunks > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_baseten.py | """Test Baseten API wrapper."""
import os
from langchain_community.llms.baseten import Baseten
# This test requires valid BASETEN_MODEL_ID and BASETEN_API_KEY environment variables
def test_baseten_call() -> None:
"""Test valid call to Baseten."""
llm = Baseten(model=os.environ["BASETEN_MODEL_ID"]) # type: ignore[call-arg]
output = llm.invoke("Test prompt, please respond.")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_openlm.py | from langchain_community.llms.openlm import OpenLM
def test_openlm_call() -> None:
"""Test valid call to openlm."""
llm = OpenLM(model_name="dolly-v2-7b", max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_anthropic.py | """Test Anthropic API wrapper."""
from typing import Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_community.llms.anthropic import Anthropic
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = Anthropic(model_name="foo")
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = Anthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams > 1
async def test_anthropic_async_generate() -> None:
"""Test async generate."""
llm = Anthropic()
output = await llm.agenerate(["How many toes do dogs have?"])
assert isinstance(output, LLMResult)
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_clarifai.py | """Test Clarifai API wrapper.
In order to run this test, you need to have an account on Clarifai.
You can sign up for free at https://clarifai.com/signup.
pip install clarifai
You'll need to set env variable CLARIFAI_PAT_KEY to your personal access token key.
"""
from langchain_community.llms.clarifai import Clarifai
def test_clarifai_call() -> None:
"""Test valid call to clarifai."""
llm = Clarifai(
user_id="google-research",
app_id="summarization",
model_id="text-summarization-english-pegasus",
)
output = llm.invoke(
"A chain is a serial assembly of connected pieces, called links, \
typically made of metal, with an overall character similar to that\
of a rope in that it is flexible and curved in compression but \
linear, rigid, and load-bearing in tension. A chain may consist\
of two or more links."
)
assert isinstance(output, str)
assert llm._llm_type == "clarifai"
assert llm.model_id == "text-summarization-english-pegasus"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_azure_openai.py | """Test AzureOpenAI wrapper."""
import os
from typing import Any, Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_community.llms import AzureOpenAI
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY", "")
DEPLOYMENT_NAME = os.environ.get(
"AZURE_OPENAI_DEPLOYMENT_NAME",
os.environ.get("AZURE_OPENAI_LLM_DEPLOYMENT_NAME", ""),
)
def _get_llm(**kwargs: Any) -> AzureOpenAI:
return AzureOpenAI(
deployment_name=DEPLOYMENT_NAME,
openai_api_version=OPENAI_API_VERSION,
openai_api_base=OPENAI_API_BASE,
openai_api_key=OPENAI_API_KEY,
**kwargs,
)
@pytest.fixture
def llm() -> AzureOpenAI:
return _get_llm(
max_tokens=10,
)
@pytest.mark.scheduled
def test_openai_call(llm: AzureOpenAI) -> None:
"""Test valid call to openai."""
output = llm.invoke("Say something nice:")
assert isinstance(output, str)
@pytest.mark.scheduled
def test_openai_streaming(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
full_response = ""
for token in generator:
assert isinstance(token, str)
full_response += token
assert full_response
@pytest.mark.scheduled
async def test_openai_astream(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
@pytest.mark.scheduled
async def test_openai_abatch(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_openai_abatch_tags(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
@pytest.mark.scheduled
def test_openai_batch(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
@pytest.mark.scheduled
async def test_openai_ainvoke(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
@pytest.mark.scheduled
def test_openai_invoke(llm: AzureOpenAI) -> None:
"""Test streaming tokens from AzureOpenAI."""
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
@pytest.mark.scheduled
def test_openai_multiple_prompts(llm: AzureOpenAI) -> None:
"""Test completion with multiple prompts."""
output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
def test_openai_streaming_best_of_error() -> None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
_get_llm(best_of=2, streaming=True)
def test_openai_streaming_n_error() -> None:
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
_get_llm(n=2, streaming=True)
def test_openai_streaming_multiple_prompts_error() -> None:
"""Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
_get_llm(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
@pytest.mark.scheduled
def test_openai_streaming_call() -> None:
"""Test valid call to openai."""
llm = _get_llm(max_tokens=10, streaming=True)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_openai_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = _get_llm(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams == 11
@pytest.mark.scheduled
async def test_openai_async_generate() -> None:
"""Test async generation."""
llm = _get_llm(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
async def test_openai_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = _get_llm(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams == 11
assert isinstance(result, LLMResult)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_titan_takeoff.py | """Test Titan Takeoff wrapper."""
import json
from typing import Any, Union
import pytest
from langchain_community.llms import TitanTakeoff, TitanTakeoffPro
@pytest.mark.requires("takeoff_client")
@pytest.mark.requires("pytest_httpx")
@pytest.mark.parametrize("streaming", [True, False])
@pytest.mark.parametrize("takeoff_object", [TitanTakeoff, TitanTakeoffPro])
def test_titan_takeoff_call(
httpx_mock: Any,
streaming: bool,
takeoff_object: Union[TitanTakeoff, TitanTakeoffPro],
) -> None:
"""Test valid call to Titan Takeoff."""
from pytest_httpx import IteratorStream
port = 2345
url = (
f"http://localhost:{port}/generate_stream"
if streaming
else f"http://localhost:{port}/generate"
)
if streaming:
httpx_mock.add_response(
method="POST",
url=url,
stream=IteratorStream([b"data: ask someone else\n\n"]),
)
else:
httpx_mock.add_response(
method="POST",
url=url,
json={"text": "ask someone else"},
)
llm = takeoff_object(port=port, streaming=streaming)
number_of_calls = 0
for function_call in [llm, llm.invoke]:
number_of_calls += 1
output = function_call("What is 2 + 2?")
assert isinstance(output, str)
assert len(httpx_mock.get_requests()) == number_of_calls
assert httpx_mock.get_requests()[0].url == url
assert (
json.loads(httpx_mock.get_requests()[0].content)["text"] == "What is 2 + 2?"
)
if streaming:
output = llm._stream("What is 2 + 2?")
for chunk in output:
assert isinstance(chunk.text, str)
assert len(httpx_mock.get_requests()) == number_of_calls + 1
assert httpx_mock.get_requests()[0].url == url
assert (
json.loads(httpx_mock.get_requests()[0].content)["text"] == "What is 2 + 2?"
)
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
@pytest.mark.parametrize("streaming", [True, False])
@pytest.mark.parametrize("takeoff_object", [TitanTakeoff, TitanTakeoffPro])
def test_titan_takeoff_bad_call(
httpx_mock: Any,
streaming: bool,
takeoff_object: Union[TitanTakeoff, TitanTakeoffPro],
) -> None:
"""Test valid call to Titan Takeoff."""
from takeoff_client import TakeoffException
url = (
"http://localhost:3000/generate"
if not streaming
else "http://localhost:3000/generate_stream"
)
httpx_mock.add_response(
method="POST", url=url, json={"text": "bad things"}, status_code=400
)
llm = takeoff_object(streaming=streaming)
with pytest.raises(TakeoffException):
llm.invoke("What is 2 + 2?")
assert len(httpx_mock.get_requests()) == 1
assert httpx_mock.get_requests()[0].url == url
assert json.loads(httpx_mock.get_requests()[0].content)["text"] == "What is 2 + 2?"
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
@pytest.mark.parametrize("takeoff_object", [TitanTakeoff, TitanTakeoffPro])
def test_titan_takeoff_model_initialisation(
httpx_mock: Any,
takeoff_object: Union[TitanTakeoff, TitanTakeoffPro],
) -> None:
"""Test valid call to Titan Takeoff."""
mgnt_port = 36452
inf_port = 46253
mgnt_url = f"http://localhost:{mgnt_port}/reader"
gen_url = f"http://localhost:{inf_port}/generate"
reader_1 = {
"model_name": "test",
"device": "cpu",
"consumer_group": "primary",
"max_sequence_length": 512,
"max_batch_size": 4,
"tensor_parallel": 3,
}
reader_2 = reader_1.copy()
reader_2["model_name"] = "test2"
httpx_mock.add_response(
method="POST", url=mgnt_url, json={"key": "value"}, status_code=201
)
httpx_mock.add_response(
method="POST", url=gen_url, json={"text": "value"}, status_code=200
)
llm = takeoff_object(
port=inf_port, mgmt_port=mgnt_port, models=[reader_1, reader_2]
)
output = llm.invoke("What is 2 + 2?")
assert isinstance(output, str)
# Ensure the management api was called to create the reader
assert len(httpx_mock.get_requests()) == 3
for key, value in reader_1.items():
assert json.loads(httpx_mock.get_requests()[0].content)[key] == value
assert httpx_mock.get_requests()[0].url == mgnt_url
# Also second call should be made to spin uo reader 2
for key, value in reader_2.items():
assert json.loads(httpx_mock.get_requests()[1].content)[key] == value
assert httpx_mock.get_requests()[1].url == mgnt_url
# Ensure the third call is to generate endpoint to inference
assert httpx_mock.get_requests()[2].url == gen_url
assert json.loads(httpx_mock.get_requests()[2].content)["text"] == "What is 2 + 2?"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py | """Test Self-hosted LLMs."""
import pickle
from typing import Any, List, Optional
from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
model_reqs = ["pip:./", "transformers", "torch"]
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2",
task="text-generation",
model_kwargs={"n_positions": 1024},
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text2text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_summarization() -> None:
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="facebook/bart-large-cnn",
task="summarization",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def load_pipeline() -> Any:
"""Load pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Inference function for testing."""
return pipeline(prompt)[0]["generated_text"]
def test_init_with_local_pipeline() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_path() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_fn() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_replicate.py | """Test Replicate API wrapper."""
from langchain_community.llms.replicate import Replicate
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
TEST_MODEL_HELLO = (
"replicate/hello-world:"
+ "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa"
)
TEST_MODEL_LANG = "meta/meta-llama-3-8b-instruct"
def test_replicate_call() -> None:
"""Test simple non-streaming call to Replicate."""
llm = Replicate(model=TEST_MODEL_HELLO)
output = llm.invoke("What is LangChain")
assert output
assert isinstance(output, str)
def test_replicate_streaming_call() -> None:
"""Test streaming call to Replicate."""
callback_handler = FakeCallbackHandler()
llm = Replicate(
streaming=True, callbacks=[callback_handler], model=TEST_MODEL_HELLO
)
output = llm.invoke("What is LangChain")
assert output
assert isinstance(output, str)
def test_replicate_model_kwargs() -> None:
"""Test simple non-streaming call to Replicate."""
llm = Replicate( # type: ignore[call-arg]
model=TEST_MODEL_LANG, model_kwargs={"max_new_tokens": 10, "temperature": 0.01}
)
long_output = llm.invoke("What is LangChain")
llm = Replicate( # type: ignore[call-arg]
model=TEST_MODEL_LANG, model_kwargs={"max_new_tokens": 5, "temperature": 0.01}
)
short_output = llm.invoke("What is LangChain")
assert len(short_output) < len(long_output)
assert llm.model_kwargs == {"max_new_tokens": 5, "temperature": 0.01}
def test_replicate_input() -> None:
llm = Replicate(model=TEST_MODEL_LANG, input={"max_new_tokens": 10})
assert llm.model_kwargs == {"max_new_tokens": 10}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_rwkv.py | # flake8: noqa
"""Test rwkv wrapper."""
import os
from urllib.request import urlretrieve
from langchain_community.llms import RWKV
import warnings
import pytest
def _download_model() -> str:
"""Download model.
From https://huggingface.co/BlinkDL/rwkv-4-pile-169m/resolve/main/RWKV-4-Pile-169M-20220807-8023.pth,
"""
model_url = "https://huggingface.co/BlinkDL/rwkv-4-pile-169m/resolve/main/RWKV-4-Pile-169M-20220807-8023.pth"
tokenizer_url = (
"https://github.com/BlinkDL/ChatRWKV/blob/main/v2/20B_tokenizer.json?raw=true"
)
local_filename = model_url.split("/")[-1]
if not os.path.exists("20B_tokenizer.json"):
urlretrieve(tokenizer_url, "20B_tokenizer.json")
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
return local_filename
@pytest.mark.filterwarnings("ignore::UserWarning:")
def test_rwkv_inference() -> None:
"""Test valid gpt4all inference."""
model_path = _download_model()
llm = RWKV(model=model_path, tokens_path="20B_tokenizer.json", strategy="cpu fp32")
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_outlines.py | # flake8: noqa
"""Test Outlines wrapper."""
from typing import Generator
import re
import platform
import pytest
from langchain_community.llms.outlines import Outlines
from pydantic import BaseModel
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
MODEL = "microsoft/Phi-3-mini-4k-instruct"
LLAMACPP_MODEL = "microsoft/Phi-3-mini-4k-instruct-gguf/Phi-3-mini-4k-instruct-q4.gguf"
BACKENDS = ["transformers", "llamacpp"]
if platform.system() != "Darwin":
BACKENDS.append("vllm")
if platform.system() == "Darwin":
BACKENDS.append("mlxlm")
@pytest.fixture(params=BACKENDS)
def llm(request: pytest.FixtureRequest) -> Outlines:
if request.param == "llamacpp":
return Outlines(model=LLAMACPP_MODEL, backend=request.param, max_tokens=100)
else:
return Outlines(model=MODEL, backend=request.param, max_tokens=100)
def test_outlines_inference(llm: Outlines) -> None:
"""Test valid outlines inference."""
output = llm.invoke("Say foo:")
assert isinstance(output, str)
assert len(output) > 1
def test_outlines_streaming(llm: Outlines) -> None:
"""Test streaming tokens from Outlines."""
generator = llm.stream("Q: How do you say 'hello' in Spanish?\n\nA:")
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
print(chunk)
assert isinstance(chunk, str)
stream_results_string += chunk
print(stream_results_string)
assert len(stream_results_string.strip()) > 1
def test_outlines_streaming_callback(llm: Outlines) -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
MIN_CHUNKS = 5
callback_handler = FakeCallbackHandler()
llm.callbacks = [callback_handler]
llm.verbose = True
llm.invoke("Q: Can you count to 10? A:'1, ")
assert callback_handler.llm_streams >= MIN_CHUNKS
def test_outlines_regex(llm: Outlines) -> None:
"""Test regex for generating a valid IP address"""
ip_regex = r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)"
llm.regex = ip_regex
assert llm.regex == ip_regex
output = llm.invoke("Q: What is the IP address of googles dns server?\n\nA: ")
assert isinstance(output, str)
assert re.match(
ip_regex, output
), f"Generated output '{output}' is not a valid IP address"
def test_outlines_type_constraints(llm: Outlines) -> None:
"""Test type constraints for generating an integer"""
llm.type_constraints = int
output = llm.invoke(
"Q: What is the answer to life, the universe, and everything?\n\nA: "
)
assert int(output)
def test_outlines_json(llm: Outlines) -> None:
"""Test json for generating a valid JSON object"""
class Person(BaseModel):
name: str
llm.json_schema = Person
output = llm.invoke("Q: Who is the author of LangChain?\n\nA: ")
person = Person.model_validate_json(output)
assert isinstance(person, Person)
def test_outlines_grammar(llm: Outlines) -> None:
"""Test grammar for generating a valid arithmetic expression"""
llm.grammar = """
?start: expression
?expression: term (("+" | "-") term)*
?term: factor (("*" | "/") factor)*
?factor: NUMBER | "-" factor | "(" expression ")"
%import common.NUMBER
%import common.WS
%ignore WS
"""
output = llm.invoke("Here is a complex arithmetic expression: ")
# Validate the output is a non-empty string
assert (
isinstance(output, str) and output.strip()
), "Output should be a non-empty string"
# Use a simple regex to check if the output contains basic arithmetic operations and numbers
assert re.search(
r"[\d\+\-\*/\(\)]+", output
), f"Generated output '{output}' does not appear to be a valid arithmetic expression"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_tongyi.py | """Test Tongyi API wrapper."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.tongyi import Tongyi
def test_tongyi_call() -> None:
"""Test valid call to tongyi."""
llm = Tongyi() # type: ignore[call-arg]
output = llm.invoke("who are you")
assert isinstance(output, str)
def test_tongyi_generate() -> None:
"""Test valid call to tongyi."""
llm = Tongyi() # type: ignore[call-arg]
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_generate_stream() -> None:
"""Test valid call to tongyi."""
llm = Tongyi(streaming=True) # type: ignore[call-arg]
output = llm.generate(["who are you"])
print(output) # noqa: T201
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_with_param_alias() -> None:
"""Test tongyi parameters alias"""
llm = Tongyi(model="qwen-max", api_key="your-api_key") # type: ignore[call-arg]
assert llm.model_name == "qwen-max"
assert llm.dashscope_api_key == "your-api_key"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_konko.py | """Test Konko API wrapper.
In order to run this test, you need to have an Konko api key.
You'll then need to set KONKO_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_community.llms import Konko
def test_konko_call() -> None:
"""Test simple call to konko."""
llm = Konko(
model="mistralai/mistral-7b-v0.1",
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "konko"
assert isinstance(output, str)
async def test_konko_acall() -> None:
"""Test simple call to konko."""
llm = Konko(
model="mistralai/mistral-7b-v0.1",
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "konko"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_deepsparse.py | """Test DeepSparse wrapper."""
from langchain_community.llms import DeepSparse
def test_deepsparse_call() -> None:
"""Test valid call to DeepSparse."""
config = {"max_generated_tokens": 5, "use_deepsparse_cache": False}
llm = DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none",
config=config,
)
output = llm.invoke("def ")
assert isinstance(output, str)
assert len(output) > 1
assert output == "ids_to_names"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_sambanova.py | """Test sambanova API llm wrappers.
In order to run this test, you need to have a sambastudio url, and api key
and a sambanova cloud api key.
You'll then need to set SAMBASTUDIO_URL, and SAMBASTUDIO_API_KEY,
and SAMBANOVA_API_KEY environment variables.
"""
from langchain_community.llms.sambanova import SambaNovaCloud, SambaStudio
def test_sambanova_cloud_call() -> None:
"""Test simple non-streaming call to sambastudio."""
llm = SambaNovaCloud()
output = llm.invoke("What is LangChain")
assert output
assert isinstance(output, str)
def test_sambastudio_call() -> None:
"""Test simple non-streaming call to sambastudio."""
llm = SambaStudio()
output = llm.invoke("What is LangChain")
assert output
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_minimax.py | """Test Minimax API wrapper."""
from langchain_community.llms.minimax import Minimax
def test_minimax_call() -> None:
"""Test valid call to minimax."""
llm = Minimax(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Hello world!")
assert isinstance(output, str)
def test_minimax_call_successful() -> None:
"""Test valid call to minimax."""
llm = Minimax() # type: ignore[call-arg]
output = llm.invoke(
"A chain is a serial assembly of connected pieces, called links, \
typically made of metal, with an overall character similar to that\
of a rope in that it is flexible and curved in compression but \
linear, rigid, and load-bearing in tension. A chain may consist\
of two or more links."
)
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py | """Test Baidu Qianfan LLM Endpoint."""
from typing import Generator, cast
from langchain_core.outputs import LLMResult
from pydantic import SecretStr
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
def test_call() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint() # type: ignore[call-arg]
output = llm.invoke("write a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint() # type: ignore[call-arg]
output = llm.generate(["write a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint() # type: ignore[call-arg]
output = llm.stream("write a joke")
assert isinstance(output, Generator)
async def test_qianfan_aio() -> None:
llm = QianfanLLMEndpoint(streaming=True) # type: ignore[call-arg]
async for token in llm.astream("hi qianfan."):
assert isinstance(token, str)
def test_rate_limit() -> None:
llm = QianfanLLMEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) # type: ignore[call-arg]
assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2
output = llm.generate(["write a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_qianfan_with_param_alias() -> None:
"""Test with qianfan llm parameter alias."""
llm = QianfanLLMEndpoint( # type: ignore[call-arg]
api_key="your-api-key", # type: ignore[arg-type]
secret_key="your-secret-key", # type: ignore[arg-type]
timeout=50,
) # type: ignore[call-arg]
assert cast(SecretStr, llm.qianfan_ak).get_secret_value() == "your-api-key"
assert cast(SecretStr, llm.qianfan_sk).get_secret_value() == "your-secret-key"
assert llm.request_timeout == 50
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_aviary.py | """Test Anyscale API wrapper."""
from langchain_community.llms.aviary import Aviary
def test_aviary_call() -> None:
"""Test valid call to Anyscale."""
llm = Aviary()
output = llm.invoke("Say bar:")
print(f"llm answer:\n{output}") # noqa: T201
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py | """Test PromptLayer OpenAIChat API wrapper."""
from pathlib import Path
import pytest
from langchain_community.llms.loading import load_llm
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
def test_promptlayer_openai_chat_call() -> None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAIChat(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_promptlayer_openai_chat_stop_valid() -> None:
"""Test promptlayer openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = PromptLayerOpenAIChat(stop="3", temperature=0) # type: ignore[call-arg]
first_output = first_llm.invoke(query)
second_llm = PromptLayerOpenAIChat(temperature=0) # type: ignore[call-arg]
second_output = second_llm.invoke(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_promptlayer_openai_chat_stop_error() -> None:
"""Test promptlayer openai stop logic on bad configuration."""
llm = PromptLayerOpenAIChat(stop="3", temperature=0) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAIChat(max_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/test_yahoo_finance_news.py | import pytest
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
# skip all tests if yfinance is not installed
yfinance = pytest.importorskip("yfinance")
def test_success() -> None:
"""Test that the tool runs successfully."""
tool = YahooFinanceNewsTool()
query = "Microsoft"
result = tool.run(query)
assert result is not None
assert f"Company ticker {query} not found." not in result
def test_failure_no_ticker() -> None:
"""Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = ""
result = tool.run(query)
assert f"Company ticker {query} not found." in result
def test_failure_wrong_ticker() -> None:
"""Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = "NOT_A_COMPANY"
result = tool.run(query)
assert f"Company ticker {query} not found." in result
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/nuclia/test_nuclia.py | import base64
import json
import os
from pathlib import Path
from typing import Any
from unittest import mock
import pytest
from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
README_PATH = Path(__file__).parents[4] / "README.md"
class FakeUploadResponse:
status_code: int = 200
text: str = "fake_uuid"
class FakePushResponse:
status_code: int = 200
def json(self) -> Any:
return {"uuid": "fake_uuid"}
class FakePullResponse:
status_code: int = 200
def json(self) -> Any:
return {
"status": "ok",
"payload": base64.b64encode(bytes('{"some": "data"}}', "utf-8")),
}
def FakeParseFromString(**args: Any) -> Any:
def ParseFromString(self: Any, data: str) -> None:
self.uuid = "fake_uuid"
return ParseFromString
def fakepost(**kwargs: Any) -> Any:
def fn(url: str, **kwargs: Any) -> Any:
if url.endswith("/processing/upload"):
return FakeUploadResponse()
elif url.endswith("/processing/push"):
return FakePushResponse()
else:
raise Exception("Invalid POST URL")
return fn
def fakeget(**kwargs: Any) -> Any:
def fn(url: str, **kwargs: Any) -> Any:
if url.endswith("/processing/pull"):
return FakePullResponse()
else:
raise Exception("Invalid GET URL")
return fn
@mock.patch.dict(os.environ, {"NUCLIA_NUA_KEY": "_a_key_"})
@pytest.mark.requires("nucliadb_protos")
def test_nuclia_tool() -> None:
with mock.patch(
"nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString",
new_callable=FakeParseFromString,
):
with mock.patch("requests.post", new_callable=fakepost):
with mock.patch("requests.get", new_callable=fakeget):
nua = NucliaUnderstandingAPI(enable_ml=False)
uuid = nua.run(
{
"action": "push",
"id": "1",
"path": str(README_PATH),
"text": None,
}
)
assert uuid == "fake_uuid"
data = nua.run(
{"action": "pull", "id": "1", "path": None, "text": None}
)
assert json.loads(data)["uuid"] == "fake_uuid"
@pytest.mark.requires("nucliadb_protos")
async def test_async_call() -> None:
with mock.patch(
"nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString",
new_callable=FakeParseFromString,
):
with mock.patch("requests.post", new_callable=fakepost):
with mock.patch("requests.get", new_callable=fakeget):
with mock.patch("os.environ.get", return_value="_a_key_"):
nua = NucliaUnderstandingAPI(enable_ml=False)
data = await nua.arun(
{
"action": "push",
"id": "1",
"path": str(README_PATH),
"text": None,
}
)
assert json.loads(data)["uuid"] == "fake_uuid"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py | import os
from typing import Any, Dict, List
import pytest
from langchain_community.tools.zenguard.tool import Detector, ZenGuardTool
@pytest.fixture()
def zenguard_tool() -> ZenGuardTool:
if os.getenv("ZENGUARD_API_KEY") is None:
raise ValueError("ZENGUARD_API_KEY is not set in environment variable")
return ZenGuardTool()
def assert_successful_response_not_detected(response: Dict[str, Any]) -> None:
assert response is not None
assert "error" not in response, f"API returned an error: {response.get('error')}"
assert response.get("is_detected") is False, f"Prompt was detected: {response}"
def assert_detectors_response(
response: Dict[str, Any],
detectors: List[Detector],
) -> None:
assert response is not None
for detector in detectors:
common_response = next(
(
resp["common_response"]
for resp in response["responses"]
if resp["detector"] == detector.value
)
)
assert (
"err" not in common_response
), f"API returned an error: {common_response.get('err')}" # noqa: E501
assert (
common_response.get("is_detected") is False
), f"Prompt was detected: {common_response}" # noqa: E501
def test_prompt_injection(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple prompt injection test"
detectors = [Detector.PROMPT_INJECTION]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_pii(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple PII test"
detectors = [Detector.PII]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_allowed_topics(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple allowed topics test"
detectors = [Detector.ALLOWED_TOPICS]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_banned_topics(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple banned topics test"
detectors = [Detector.BANNED_TOPICS]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_keywords(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple keywords test"
detectors = [Detector.KEYWORDS]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_secrets(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple secrets test"
detectors = [Detector.SECRETS]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_toxicity(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple toxicity test"
detectors = [Detector.TOXICITY]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_successful_response_not_detected(response)
def test_all_detectors(zenguard_tool: ZenGuardTool) -> None:
prompt = "Simple all detectors test"
detectors = [
Detector.ALLOWED_TOPICS,
Detector.BANNED_TOPICS,
Detector.KEYWORDS,
Detector.PII,
Detector.PROMPT_INJECTION,
Detector.SECRETS,
Detector.TOXICITY,
]
response = zenguard_tool.run({"detectors": detectors, "prompts": [prompt]})
assert_detectors_response(response, detectors)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/connery/test_service.py | """Integration test for Connery API Wrapper."""
from langchain_community.tools.connery import ConneryService
def test_list_actions() -> None:
"""Test for listing Connery Actions."""
connery = ConneryService()
output = connery._list_actions()
assert output is not None
assert len(output) > 0
def test_get_action() -> None:
"""Test for getting Connery Action."""
connery = ConneryService()
# This is the ID of the preinstalled action "Refresh plugin cache"
output = connery._get_action("CAF979E6D2FF4C8B946EEBAFCB3BA475")
assert output is not None
assert output.id == "CAF979E6D2FF4C8B946EEBAFCB3BA475"
def test_run_action_with_no_iput() -> None:
"""Test for running Connery Action without input."""
connery = ConneryService()
# refreshPluginCache action from connery-io/connery-runner-administration plugin
output = connery._run_action("CAF979E6D2FF4C8B946EEBAFCB3BA475")
assert output is not None
assert output == {}
def test_run_action_with_iput() -> None:
"""Test for running Connery Action with input."""
connery = ConneryService()
# summarizePublicWebpage action from connery-io/summarization-plugin plugin
output = connery._run_action(
"CA72DFB0AB4DF6C830B43E14B0782F70",
{"publicWebpageUrl": "http://www.paulgraham.com/vb.html"},
)
assert output is not None
assert output["summary"] is not None
assert len(output["summary"]) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_audio_text_to_speech.py | """Test EdenAi's text to speech Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from urllib.parse import urlparse
from langchain_community.tools.edenai import EdenAiTextToSpeechTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text to speech endpoint."""
text2speech = EdenAiTextToSpeechTool( # type: ignore[call-arg]
providers=["amazon"], language="en", voice="MALE"
)
output = text2speech.invoke("hello")
parsed_url = urlparse(output)
assert text2speech.name == "edenai_text_to_speech"
assert text2speech.feature == "audio"
assert text2speech.subfeature == "text_to_speech"
assert isinstance(output, str)
assert parsed_url.scheme in ["http", "https"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_ocr_identityparser.py | """Test EdenAi's identity parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiParsingIDTool
def test_edenai_call() -> None:
"""Test simple call to edenai's identity parser endpoint."""
id_parser = EdenAiParsingIDTool(providers=["amazon"], language="en") # type: ignore[call-arg]
output = id_parser.invoke(
"https://www.citizencard.com/images/citizencard-uk-id-card-2023.jpg"
)
assert id_parser.name == "edenai_identity_parsing"
assert id_parser.feature == "ocr"
assert id_parser.subfeature == "identity_parser"
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_audio_speech_to_text.py | """Test EdenAi's speech to text Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiSpeechToTextTool
def test_edenai_call() -> None:
"""Test simple call to edenai's speech to text endpoint."""
speech2text = EdenAiSpeechToTextTool(providers=["amazon"]) # type: ignore[call-arg]
output = speech2text.invoke(
"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3"
)
assert speech2text.name == "edenai_speech_to_text"
assert speech2text.feature == "audio"
assert speech2text.subfeature == "speech_to_text_async"
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_image_explicitcontent.py | """Test EdenAi's image moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiExplicitImageTool
def test_edenai_call() -> None:
"""Test simple call to edenai's image moderation endpoint."""
image_moderation = EdenAiExplicitImageTool(providers=["amazon"]) # type: ignore[call-arg]
output = image_moderation.invoke("https://static.javatpoint.com/images/objects.jpg")
assert image_moderation.name == "edenai_image_explicit_content_detection"
assert image_moderation.feature == "image"
assert image_moderation.subfeature == "explicit_content"
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_ocr_invoiceparser.py | """Test EdenAi's invoice parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
def test_edenai_call() -> None:
"""Test simple call to edenai's invoice parser endpoint."""
invoice_parser = EdenAiParsingInvoiceTool(providers=["amazon"], language="en") # type: ignore[call-arg]
output = invoice_parser.invoke(
"https://app.edenai.run/assets/img/data_1.72e3bdcc.png"
)
assert invoice_parser.name == "edenai_invoice_parsing"
assert invoice_parser.feature == "ocr"
assert invoice_parser.subfeature == "invoice_parser"
assert isinstance(output, str)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.