index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_embedchain.py | """Integration test for Embedchain."""
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers.embedchain import EmbedchainRetriever
try:
from embedchain import Pipeline
except ImportError:
pytest.skip("Requires embedchain", allow_module_level=True)
os.environ["OPENAI_API_KEY"] = "sk-xxxx"
context_value = [
{
"context": "this document is about John",
"metadata": {
"source": "source#1",
"doc_id": 123,
},
},
]
@pytest.mark.requires("embedchain")
@patch.object(Pipeline, "search", return_value=context_value)
@patch.object(Pipeline, "add", return_value=123)
def test_embedchain_retriever(mock_add: Any, mock_search: Any) -> None:
retriever = EmbedchainRetriever.create()
texts = [
"This document is about John",
]
for text in texts:
retriever.add_texts(text)
docs = retriever.invoke("doc about john")
assert len(docs) == 1
for doc in docs:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata
assert len(list(doc.metadata.items())) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_qdrant_sparse_vector_retriever.py | import random
import uuid
from typing import List, Tuple
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import QdrantSparseVectorRetriever
from langchain_community.vectorstores.qdrant import QdrantException
def consistent_fake_sparse_encoder(
query: str, size: int = 100, density: float = 0.7
) -> Tuple[List[int], List[float]]:
"""
Generates a consistent fake sparse vector.
Parameters:
- query (str): The query string to make the function deterministic.
- size (int): The size of the vector to generate.
- density (float): The density of the vector to generate.
Returns:
- indices (list): List of indices where the non-zero elements are located.
- values (list): List of corresponding float values at the non-zero indices.
"""
# Ensure density is within the valid range [0, 1]
density = max(0.0, min(1.0, density))
# Use a deterministic seed based on the query
seed = hash(query)
random.seed(seed)
# Calculate the number of non-zero elements based on density
num_non_zero_elements = int(size * density)
# Generate random indices without replacement
indices = sorted(random.sample(range(size), num_non_zero_elements))
# Generate random float values for the non-zero elements
values = [random.uniform(0.0, 1.0) for _ in range(num_non_zero_elements)]
return indices, values
@pytest.fixture
def retriever() -> QdrantSparseVectorRetriever:
from qdrant_client import QdrantClient, models
client = QdrantClient(location=":memory:")
collection_name = uuid.uuid4().hex
vector_name = uuid.uuid4().hex
client.recreate_collection(
collection_name,
vectors_config={},
sparse_vectors_config={
vector_name: models.SparseVectorParams(
index=models.SparseIndexParams(
on_disk=False,
)
)
},
)
return QdrantSparseVectorRetriever(
client=client,
collection_name=collection_name,
sparse_vector_name=vector_name,
sparse_encoder=consistent_fake_sparse_encoder,
)
def test_invalid_collection_name(retriever: QdrantSparseVectorRetriever) -> None:
with pytest.raises(QdrantException) as e:
QdrantSparseVectorRetriever(
client=retriever.client,
collection_name="invalid collection",
sparse_vector_name=retriever.sparse_vector_name,
sparse_encoder=consistent_fake_sparse_encoder,
)
assert "does not exist" in str(e.value)
def test_invalid_sparse_vector_name(retriever: QdrantSparseVectorRetriever) -> None:
with pytest.raises(QdrantException) as e:
QdrantSparseVectorRetriever(
client=retriever.client,
collection_name=retriever.collection_name,
sparse_vector_name="invalid sparse vector",
sparse_encoder=consistent_fake_sparse_encoder,
)
assert "does not contain sparse vector" in str(e.value)
def test_add_documents(retriever: QdrantSparseVectorRetriever) -> None:
documents = [
Document(page_content="hello world", metadata={"a": 1}),
Document(page_content="foo bar", metadata={"b": 2}),
Document(page_content="baz qux", metadata={"c": 3}),
]
ids = retriever.add_documents(documents)
assert retriever.client.count(retriever.collection_name, exact=True).count == 3
documents = [
Document(page_content="hello world"),
Document(page_content="foo bar"),
Document(page_content="baz qux"),
]
ids = retriever.add_documents(documents)
assert len(ids) == 3
assert retriever.client.count(retriever.collection_name, exact=True).count == 6
def test_add_texts(retriever: QdrantSparseVectorRetriever) -> None:
retriever.add_texts(
["hello world", "foo bar", "baz qux"], [{"a": 1}, {"b": 2}, {"c": 3}]
)
assert retriever.client.count(retriever.collection_name, exact=True).count == 3
retriever.add_texts(["hello world", "foo bar", "baz qux"])
assert retriever.client.count(retriever.collection_name, exact=True).count == 6
def test_invoke(retriever: QdrantSparseVectorRetriever) -> None:
retriever.add_texts(["Hai there!", "Hello world!", "Foo bar baz!"])
expected = [Document(page_content="Hai there!")]
retriever.k = 1
results = retriever.invoke("Hai there!")
assert len(results) == retriever.k
assert results == expected
assert retriever.invoke("Hai there!") == expected
def test_invoke_with_filter(
retriever: QdrantSparseVectorRetriever,
) -> None:
from qdrant_client import models
retriever.add_texts(
["Hai there!", "Hello world!", "Foo bar baz!"],
[
{"value": 1},
{"value": 2},
{"value": 3},
],
)
retriever.filter = models.Filter(
must=[
models.FieldCondition(
key="metadata.value", match=models.MatchValue(value=2)
)
]
)
results = retriever.invoke("Some query")
assert results[0] == Document(page_content="Hello world!", metadata={"value": 2})
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_you.py | import os
from langchain_community.retrievers.you import YouRetriever
class TestYouRetriever:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("YDC_API_KEY"):
raise ValueError("YDC_API_KEY environment variable is not set")
def test_invoke(self) -> None:
retriever = YouRetriever()
actual = retriever.invoke("test")
assert len(actual) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_kay.py | """Integration test for Kay.ai API Wrapper."""
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import KayAiRetriever
@pytest.mark.requires("kay")
def test_kay_retriever() -> None:
retriever = KayAiRetriever.create(
dataset_id="company",
data_types=["10-K", "10-Q", "8-K", "PressRelease"],
num_contexts=3,
)
docs = retriever.invoke(
"What were the biggest strategy changes and partnerships made by Roku "
"in 2023?",
)
assert len(docs) == 3
for doc in docs:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata
assert len(list(doc.metadata.items())) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import PubMedRetriever
@pytest.fixture
def retriever() -> PubMedRetriever:
return PubMedRetriever() # type: ignore[call-arg]
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
def test_load_success(retriever: PubMedRetriever) -> None:
docs = retriever.invoke("chatgpt")
assert len(docs) == 3
assert_docs(docs)
def test_load_success_top_k_results(retriever: PubMedRetriever) -> None:
retriever.top_k_results = 2
docs = retriever.invoke("chatgpt")
assert len(docs) == 2
assert_docs(docs)
def test_load_no_result(retriever: PubMedRetriever) -> None:
docs = retriever.invoke("1605.08386WWW")
assert not docs
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_breebs.py | from typing import List
from langchain_core.documents import Document
from langchain_community.retrievers.breebs import BreebsRetriever
class TestBreebsRetriever:
def test_breeb_query(self) -> None:
breeb_key = "Parivoyage"
query = "What are the best churches to visit in Paris?"
breeb_retriever = BreebsRetriever(breeb_key)
documents: List[Document] = breeb_retriever.invoke(query)
assert isinstance(documents, list), "Documents should be a list"
for doc in documents:
assert doc.page_content, "Document page_content should not be None"
assert doc.metadata["source"], "Document metadata should contain 'source'"
assert doc.metadata["score"] == 1, "Document score should be equal to 1"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_google_vertex_ai_search.py | """Test Google Vertex AI Search retriever.
You need to create a Vertex AI Search app and populate it
with data to run the integration tests.
Follow the instructions in the example notebook:
google_vertex_ai_search.ipynb
to set up the app and configure authentication.
Set the following environment variables before the tests:
export PROJECT_ID=... - set to your Google Cloud project ID
export DATA_STORE_ID=... - the ID of the search engine to use for the test
"""
import os
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers.google_vertex_ai_search import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_invoke() -> None:
"""Test the invoke() method."""
retriever = GoogleVertexAISearchRetriever()
documents = retriever.invoke("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_multiturnsearch_invoke() -> None:
"""Test the invoke() method."""
retriever = GoogleVertexAIMultiTurnSearchRetriever()
documents = retriever.invoke("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_enterprise_search_deprecation() -> None:
"""Test the deprecation of GoogleCloudEnterpriseSearchRetriever."""
with pytest.warns(
DeprecationWarning,
match="GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501
):
retriever = GoogleCloudEnterpriseSearchRetriever()
os.environ["SEARCH_ENGINE_ID"] = os.getenv("DATA_STORE_ID", "data_store_id")
with pytest.warns(
DeprecationWarning,
match="The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.", # noqa: E501
):
retriever = GoogleCloudEnterpriseSearchRetriever()
# Check that mapped methods still work.
documents = retriever.invoke("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_wikipedia.py | """Integration test for Wikipedia Retriever."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import WikipediaRetriever
@pytest.fixture
def retriever() -> WikipediaRetriever:
return WikipediaRetriever() # type: ignore[call-arg]
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: WikipediaRetriever) -> None:
docs = retriever.invoke("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: WikipediaRetriever) -> None:
retriever.load_all_available_meta = True
docs = retriever.invoke("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = WikipediaRetriever( # type: ignore[call-arg]
lang="en", top_k_results=1, load_all_available_meta=True
)
docs = retriever.invoke("HUNTER X HUNTER")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args_more() -> None:
retriever = WikipediaRetriever( # type: ignore[call-arg]
lang="en", top_k_results=20, load_all_available_meta=False
)
docs = retriever.invoke("HUNTER X HUNTER")
assert len(docs) == 20
assert_docs(docs, all_meta=False)
def test_load_no_result(retriever: WikipediaRetriever) -> None:
docs = retriever.invoke(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_azure_ai_search.py | """Test Azure AI Search wrapper."""
from langchain_core.documents import Document
from langchain_community.retrievers.azure_ai_search import (
AzureAISearchRetriever,
AzureCognitiveSearchRetriever,
)
def test_azure_ai_search_invoke() -> None:
"""Test valid call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, azure search `api_key` and an `index_name`
as arguments for the AzureAISearchRetriever in both tests.
api_version, aiosession and topk_k are optional parameters.
"""
retriever = AzureAISearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureAISearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_ai_search_ainvoke() -> None:
"""Test valid async call to Azure AI Search.
In order to run this test, you should provide
a `service_name`, azure search `api_key` and an `index_name`
as arguments for the AzureAISearchRetriever.
"""
retriever = AzureAISearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
def test_azure_cognitive_search_invoke() -> None:
"""Test valid call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = retriever.invoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureCognitiveSearchRetriever(top_k=1)
documents = retriever.invoke("what is langchain?")
assert len(documents) <= 1
async def test_azure_cognitive_search_ainvoke() -> None:
"""Test valid async call to Azure Cognitive Search.
This is to test backwards compatibility of the retriever
"""
retriever = AzureCognitiveSearchRetriever()
documents = await retriever.ainvoke("what is langchain?")
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_dria_index.py | import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import DriaRetriever
# Set a fixture for DriaRetriever
@pytest.fixture
def dria_retriever() -> DriaRetriever:
api_key = "<YOUR_API_KEY>"
contract_id = "B16z9i3rRi0KEeibrzzMU33YTB4WDtos1vdiMBTmKgs"
retriever = DriaRetriever(api_key=api_key, contract_id=contract_id)
return retriever
def test_dria_retriever(dria_retriever: DriaRetriever) -> None:
texts = [
{
"text": "Langchain",
"metadata": {
"source": "source#1",
"document_id": "doc123",
"content": "Langchain",
},
}
]
dria_retriever.add_texts(texts)
# Assuming invoke returns a list of Document instances
docs = dria_retriever.invoke("Langchain")
# Perform assertions
assert len(docs) > 0, "Expected at least one document"
doc = docs[0]
assert isinstance(doc, Document), "Expected a Document instance"
assert isinstance(doc.page_content, str), (
"Expected document content type " "to be string"
)
assert isinstance(
doc.metadata, dict
), "Expected document metadata content to be a dictionary"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_merger_retriever.py | from langchain.retrievers.merger_retriever import MergerRetriever
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.embeddings import OpenAIEmbeddings
def test_merger_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts_group_a = [
"This is a document about the Boston Celtics",
"Fly me to the moon is one of my favourite songs."
"I simply love going to the movies",
]
texts_group_b = [
"This is a document about the Poenix Suns",
"The Boston Celtics won the game by 20 points",
"Real stupidity beats artificial intelligence every time. TP",
]
embeddings = OpenAIEmbeddings()
retriever_a = InMemoryVectorStore.from_texts(
texts_group_a, embedding=embeddings
).as_retriever(search_kwargs={"k": 1})
retriever_b = InMemoryVectorStore.from_texts(
texts_group_b, embedding=embeddings
).as_retriever(search_kwargs={"k": 1})
# The Lord of the Retrievers.
lotr = MergerRetriever(retrievers=[retriever_a, retriever_b])
actual = lotr.invoke("Tell me about the Celtics")
assert len(actual) == 2
assert texts_group_a[0] in [d.page_content for d in actual]
assert texts_group_b[1] in [d.page_content for d in actual]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_contextual_compression.py | from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
def test_contextual_compression_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts = [
"This is a document about the Boston Celtics",
"The Boston Celtics won the game by 20 points",
"I simply love going to the movies",
]
embeddings = OpenAIEmbeddings()
base_compressor = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
base_retriever = FAISS.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": len(texts)}
)
retriever = ContextualCompressionRetriever(
base_compressor=base_compressor, base_retriever=base_retriever
)
actual = retriever.invoke("Tell me about the Celtics")
assert len(actual) == 2
assert texts[-1] not in [d.page_content for d in actual]
async def test_acontextual_compression_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts = [
"This is a document about the Boston Celtics",
"The Boston Celtics won the game by 20 points",
"I simply love going to the movies",
]
embeddings = OpenAIEmbeddings()
base_compressor = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
base_retriever = FAISS.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": len(texts)}
)
retriever = ContextualCompressionRetriever(
base_compressor=base_compressor, base_retriever=base_retriever
)
actual = retriever.invoke("Tell me about the Celtics")
assert len(actual) == 2
assert texts[-1] not in [d.page_content for d in actual]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_weaviate_hybrid_search.py | """Test Weaviate functionality."""
import logging
import os
import uuid
from typing import Generator, Union
from uuid import uuid4
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers.weaviate_hybrid_search import (
WeaviateHybridSearchRetriever,
)
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
class TestWeaviateHybridSearchRetriever:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def weaviate_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return]
"""Return the weaviate url."""
from weaviate import Client
url = "http://localhost:8080"
yield url
# Clear the test index
client = Client(url)
client.schema.delete_all()
@pytest.mark.vcr(ignore_localhost=True)
def test_invoke(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
output = retriever.invoke("foo")
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="baz", metadata={"page": 2}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_invoke_with_score(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
output = retriever.invoke("foo", score=True)
for doc in output:
assert "_additional" in doc.metadata
@pytest.mark.vcr(ignore_localhost=True)
def test_invoke_with_filter(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0}
output = retriever.invoke("foo", where_filter=where_filter)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_invoke_with_uuids(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
# Weaviate replaces the object if the UUID already exists
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, "same-name") for text in texts]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])], uuids=[uuids[i]]
)
output = retriever.invoke("foo")
assert len(output) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py | """Integration test for embedding-based relevant doc filtering."""
import numpy as np
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_core.documents import Document
from langchain_community.document_transformers.embeddings_redundant_filter import (
_DocumentWithState,
)
from langchain_community.embeddings import OpenAIEmbeddings
def test_embeddings_filter() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
actual = relevant_filter.compress_documents(docs, "What did I say about food?")
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
async def atest_embeddings_filter() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
actual = relevant_filter.compress_documents(docs, "What did I say about food?")
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
def test_embeddings_filter_with_state() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
query = "What did I say about food?"
embeddings = OpenAIEmbeddings()
embedded_query = embeddings.embed_query(query)
state = {"embedded_doc": np.zeros(len(embedded_query))}
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
docs[-1].state = {"embedded_doc": embedded_query}
relevant_filter = EmbeddingsFilter( # type: ignore[call-arg]
embeddings=embeddings, similarity_threshold=0.75, return_similarity_scores=True
)
actual = relevant_filter.compress_documents(docs, query)
assert len(actual) == 1
assert texts[-1] == actual[0].page_content
async def test_aembeddings_filter_with_state() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
query = "What did I say about food?"
embeddings = OpenAIEmbeddings()
embedded_query = embeddings.embed_query(query)
state = {"embedded_doc": np.zeros(len(embedded_query))}
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
docs[-1].state = {"embedded_doc": embedded_query}
relevant_filter = EmbeddingsFilter( # type: ignore[call-arg]
embeddings=embeddings, similarity_threshold=0.75, return_similarity_scores=True
)
actual = relevant_filter.compress_documents(docs, query)
assert len(actual) == 1
assert texts[-1] == actual[0].page_content
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/document_compressors/test_base.py | """Integration test for compression pipelines."""
from langchain.retrievers.document_compressors import (
DocumentCompressorPipeline,
EmbeddingsFilter,
)
from langchain_core.documents import Document
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain_community.embeddings import OpenAIEmbeddings
def test_document_compressor_pipeline() -> None:
embeddings = OpenAIEmbeddings()
splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator=". ")
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8)
pipeline_filter = DocumentCompressorPipeline(
transformers=[splitter, redundant_filter, relevant_filter]
)
texts = [
"This sentence is about cows",
"This sentence was about cows",
"foo bar baz",
]
docs = [Document(page_content=". ".join(texts))]
actual = pipeline_filter.compress_documents(docs, "Tell me about farm animals")
assert len(actual) == 1
assert actual[0].page_content in texts[:2]
async def test_adocument_compressor_pipeline() -> None:
embeddings = OpenAIEmbeddings()
splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator=". ")
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8)
pipeline_filter = DocumentCompressorPipeline(
transformers=[splitter, redundant_filter, relevant_filter]
)
texts = [
"This sentence is about cows",
"This sentence was about cows",
"foo bar baz",
]
docs = [Document(page_content=". ".join(texts))]
actual = await pipeline_filter.acompress_documents(
docs, "Tell me about farm animals"
)
assert len(actual) == 1
assert actual[0].page_content in texts[:2]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/document_compressors/test_chain_extract.py | """Integration test for LLMChainExtractor."""
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_core.documents import Document
from langchain_community.chat_models import ChatOpenAI
def test_llm_chain_extractor() -> None:
texts = [
"The Roman Empire followed the Roman Republic.",
"I love chocolate chip cookies—my mother makes great cookies.",
"The first Roman emperor was Caesar Augustus.",
"Don't you just love Caesar salad?",
"The Roman Empire collapsed in 476 AD after the fall of Rome.",
"Let's go to Olive Garden!",
]
doc = Document(page_content=" ".join(texts))
compressor = LLMChainExtractor.from_llm(ChatOpenAI())
actual = compressor.compress_documents([doc], "Tell me about the Roman Empire")[
0
].page_content
expected_returned = [0, 2, 4]
expected_not_returned = [1, 3, 5]
assert all([texts[i] in actual for i in expected_returned])
assert all([texts[i] not in actual for i in expected_not_returned])
def test_llm_chain_extractor_empty() -> None:
texts = [
"I love chocolate chip cookies—my mother makes great cookies.",
"Don't you just love Caesar salad?",
"Let's go to Olive Garden!",
]
doc = Document(page_content=" ".join(texts))
compressor = LLMChainExtractor.from_llm(ChatOpenAI())
actual = compressor.compress_documents([doc], "Tell me about the Roman Empire")
assert len(actual) == 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/document_compressors/test_chain_filter.py | """Integration test for llm-based relevant doc filtering."""
from langchain.retrievers.document_compressors import LLMChainFilter
from langchain_core.documents import Document
from langchain_community.chat_models import ChatOpenAI
def test_llm_chain_filter() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
docs = [Document(page_content=t) for t in texts]
relevant_filter = LLMChainFilter.from_llm(llm=ChatOpenAI())
actual = relevant_filter.compress_documents(docs, "Things I said related to food")
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Generator, Tuple
import numpy as np
import pytest
from pydantic import Field
if TYPE_CHECKING:
from docarray.index import (
ElasticDocIndex,
HnswDocumentIndex,
InMemoryExactNNIndex,
QdrantDocumentIndex,
WeaviateDocumentIndex,
)
from docarray.typing import NdArray
from qdrant_client.http import models as rest
from langchain_community.embeddings import FakeEmbeddings
@pytest.fixture
def init_weaviate() -> (
Generator[
Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings],
None,
None,
]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
from docarray import BaseDoc
from docarray.index import (
WeaviateDocumentIndex,
)
class WeaviateDoc(BaseDoc):
# When initializing the Weaviate index, denote the field
# you want to search on with `is_embedding=True`
title: str
title_embedding: NdArray[32] = Field(is_embedding=True) # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize WeaviateDocumentIndex
dbconfig = WeaviateDocumentIndex.DBConfig(host="http://localhost:8080")
weaviate_db = WeaviateDocumentIndex[WeaviateDoc](
db_config=dbconfig, index_name="docarray_retriever"
)
# index data
weaviate_db.index(
[
WeaviateDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"path": ["year"], "operator": "LessThanEqual", "valueInt": "90"}
yield weaviate_db, filter_query, embeddings
weaviate_db._client.schema.delete_all()
@pytest.fixture
def init_elastic() -> (
Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
from docarray import BaseDoc
from docarray.index import (
ElasticDocIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize ElasticDocIndex
elastic_db = ElasticDocIndex[MyDoc](
hosts="http://localhost:9200", index_name="docarray_retriever"
)
# index data
elastic_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"range": {"year": {"lte": 90}}}
yield elastic_db, filter_query, embeddings
elastic_db._client.indices.delete(index="docarray_retriever")
@pytest.fixture
def init_qdrant() -> Tuple[QdrantDocumentIndex, rest.Filter, FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize QdrantDocumentIndex
qdrant_config = QdrantDocumentIndex.DBConfig(path=":memory:")
qdrant_db = QdrantDocumentIndex[MyDoc](qdrant_config)
# index data
qdrant_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = rest.Filter(
must=[
rest.FieldCondition(
key="year",
range=rest.Range(
gte=10,
lt=90,
),
)
]
)
return qdrant_db, filter_query, embeddings
@pytest.fixture
def init_in_memory() -> Tuple[InMemoryExactNNIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import InMemoryExactNNIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
in_memory_db = InMemoryExactNNIndex[MyDoc]()
# index data
in_memory_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return in_memory_db, filter_query, embeddings
@pytest.fixture
def init_hnsw(
tmp_path: Path,
) -> Tuple[HnswDocumentIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import (
HnswDocumentIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path)
# index data
hnsw_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return hnsw_db, filter_query, embeddings
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/docarray/test_backends.py | from typing import Any
import pytest
from langchain_community.retrievers import DocArrayRetriever
from tests.integration_tests.retrievers.docarray.fixtures import ( # noqa: F401
init_elastic,
init_hnsw,
init_in_memory,
init_qdrant,
init_weaviate,
)
@pytest.mark.parametrize(
"backend",
["init_hnsw", "init_in_memory", "init_qdrant", "init_elastic", "init_weaviate"],
)
def test_backends(request: Any, backend: Any) -> None:
index, filter_query, embeddings = request.getfixturevalue(backend)
# create a retriever
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
)
docs = retriever.invoke("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
# create a retriever with filters
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
docs = retriever.invoke("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
assert docs[0].metadata["year"] <= 90
# create a retriever with MMR search
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
search_type="mmr", # type: ignore[arg-type]
content_field="title",
filters=filter_query,
)
docs = retriever.invoke("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
assert docs[0].metadata["year"] <= 90
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores/test_upgrade_to_cassandra.py | """Test of Upgrading to Apache Cassandra graph vector store class:
`CassandraGraphVectorStore` from an existing table used
by the Cassandra vector store class: `Cassandra`
"""
from __future__ import annotations
import json
import os
from contextlib import contextmanager
from typing import Any, Generator, Iterable, Optional, Tuple, Union
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.utilities.cassandra import SetupMode
from langchain_community.vectorstores import Cassandra
TEST_KEYSPACE = "graph_test_keyspace"
TABLE_NAME_ALLOW_INDEXING = "allow_graph_table"
TABLE_NAME_DEFAULT = "default_graph_table"
TABLE_NAME_DENY_INDEXING = "deny_graph_table"
class ParserEmbeddings(Embeddings):
"""Parse input texts: if they are json for a List[float], fine.
Otherwise, return all zeros and call it a day.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self.embed_query(txt) for txt in texts]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
try:
vals = json.loads(text)
except json.JSONDecodeError:
return [0.0] * self.dimension
else:
assert len(vals) == self.dimension
return vals
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
@pytest.fixture
def embedding_d2() -> Embeddings:
return ParserEmbeddings(dimension=2)
class CassandraSession:
table_name: str
session: Any
def __init__(self, table_name: str, session: Any):
self.table_name = table_name
self.session = session
@contextmanager
def get_cassandra_session(
table_name: str, drop: bool = True
) -> Generator[CassandraSession, None, None]:
"""Initialize the Cassandra cluster and session"""
from cassandra.cluster import Cluster
if "CASSANDRA_CONTACT_POINTS" in os.environ:
contact_points = [
cp.strip()
for cp in os.environ["CASSANDRA_CONTACT_POINTS"].split(",")
if cp.strip()
]
else:
contact_points = None
cluster = Cluster(contact_points)
session = cluster.connect()
try:
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {TEST_KEYSPACE}"
" WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': 1}"
)
)
if drop:
session.execute(f"DROP TABLE IF EXISTS {TEST_KEYSPACE}.{table_name}")
# Yield the session for usage
yield CassandraSession(table_name=table_name, session=session)
finally:
# Ensure proper shutdown/cleanup of resources
session.shutdown()
cluster.shutdown()
@contextmanager
def vector_store(
embedding: Embeddings,
table_name: str,
setup_mode: SetupMode,
metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all",
drop: bool = True,
) -> Generator[Cassandra, None, None]:
with get_cassandra_session(table_name=table_name, drop=drop) as session:
yield Cassandra(
table_name=session.table_name,
keyspace=TEST_KEYSPACE,
session=session.session,
embedding=embedding,
setup_mode=setup_mode,
metadata_indexing=metadata_indexing,
)
@contextmanager
def graph_vector_store(
embedding: Embeddings,
table_name: str,
setup_mode: SetupMode,
metadata_deny_list: Optional[list[str]] = None,
drop: bool = True,
) -> Generator[CassandraGraphVectorStore, None, None]:
with get_cassandra_session(table_name=table_name, drop=drop) as session:
yield CassandraGraphVectorStore(
table_name=session.table_name,
keyspace=TEST_KEYSPACE,
session=session.session,
embedding=embedding,
setup_mode=setup_mode,
metadata_deny_list=metadata_deny_list,
)
def _vs_indexing_policy(table_name: str) -> Union[Tuple[str, Iterable[str]], str]:
if table_name == TABLE_NAME_ALLOW_INDEXING:
return ("allowlist", ["test"])
if table_name == TABLE_NAME_DEFAULT:
return "all"
if table_name == TABLE_NAME_DENY_INDEXING:
return ("denylist", ["test"])
msg = f"Unknown table_name: {table_name} in _vs_indexing_policy()"
raise ValueError(msg)
class TestUpgradeToGraphVectorStore:
@pytest.mark.parametrize(
("table_name", "gvs_setup_mode", "gvs_metadata_deny_list"),
[
(TABLE_NAME_DEFAULT, SetupMode.SYNC, None),
(TABLE_NAME_DENY_INDEXING, SetupMode.SYNC, ["test"]),
(TABLE_NAME_DEFAULT, SetupMode.OFF, None),
(TABLE_NAME_DENY_INDEXING, SetupMode.OFF, ["test"]),
# for this one, even though the passed policy doesn't
# match the policy used to create the collection,
# there is no error since the SetupMode is OFF and
# and no attempt is made to re-create the collection.
(TABLE_NAME_DENY_INDEXING, SetupMode.OFF, None),
],
ids=[
"default_upgrade_no_policy_sync",
"deny_list_upgrade_same_policy_sync",
"default_upgrade_no_policy_off",
"deny_list_upgrade_same_policy_off",
"deny_list_upgrade_change_policy_off",
],
)
def test_upgrade_to_gvs_success_sync(
self,
*,
embedding_d2: Embeddings,
gvs_setup_mode: SetupMode,
table_name: str,
gvs_metadata_deny_list: list[str],
) -> None:
doc_id = "AL"
doc_al = Document(id=doc_id, page_content="[-1, 9]", metadata={"label": "AL"})
# Create vector store using SetupMode.SYNC
with vector_store(
embedding=embedding_d2,
table_name=table_name,
setup_mode=SetupMode.SYNC,
metadata_indexing=_vs_indexing_policy(table_name=table_name),
drop=True,
) as v_store:
# load a document to the vector store
v_store.add_documents([doc_al])
# get the document from the vector store
v_doc = v_store.get_by_document_id(document_id=doc_id)
assert v_doc is not None
assert v_doc.page_content == doc_al.page_content
# Create a GRAPH Vector Store using the existing collection from above
# with setup_mode=gvs_setup_mode and indexing_policy=gvs_indexing_policy
with graph_vector_store(
embedding=embedding_d2,
table_name=table_name,
setup_mode=gvs_setup_mode,
metadata_deny_list=gvs_metadata_deny_list,
drop=False,
) as gv_store:
# get the document from the GRAPH vector store
gv_doc = gv_store.get_by_document_id(document_id=doc_id)
assert gv_doc is not None
assert gv_doc.page_content == doc_al.page_content
@pytest.mark.parametrize(
("table_name", "gvs_setup_mode", "gvs_metadata_deny_list"),
[
(TABLE_NAME_DEFAULT, SetupMode.ASYNC, None),
(TABLE_NAME_DENY_INDEXING, SetupMode.ASYNC, ["test"]),
],
ids=[
"default_upgrade_no_policy_async",
"deny_list_upgrade_same_policy_async",
],
)
async def test_upgrade_to_gvs_success_async(
self,
*,
embedding_d2: Embeddings,
gvs_setup_mode: SetupMode,
table_name: str,
gvs_metadata_deny_list: list[str],
) -> None:
doc_id = "AL"
doc_al = Document(id=doc_id, page_content="[-1, 9]", metadata={"label": "AL"})
# Create vector store using SetupMode.ASYNC
with vector_store(
embedding=embedding_d2,
table_name=table_name,
setup_mode=SetupMode.ASYNC,
metadata_indexing=_vs_indexing_policy(table_name=table_name),
drop=True,
) as v_store:
# load a document to the vector store
await v_store.aadd_documents([doc_al])
# get the document from the vector store
v_doc = await v_store.aget_by_document_id(document_id=doc_id)
assert v_doc is not None
assert v_doc.page_content == doc_al.page_content
# Create a GRAPH Vector Store using the existing collection from above
# with setup_mode=gvs_setup_mode and indexing_policy=gvs_indexing_policy
with graph_vector_store(
embedding=embedding_d2,
table_name=table_name,
setup_mode=gvs_setup_mode,
metadata_deny_list=gvs_metadata_deny_list,
drop=False,
) as gv_store:
# get the document from the GRAPH vector store
gv_doc = await gv_store.aget_by_document_id(document_id=doc_id)
assert gv_doc is not None
assert gv_doc.page_content == doc_al.page_content
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores/test_cassandra.py | """Test of Apache Cassandra graph vector g_store class `CassandraGraphVectorStore`"""
import json
import os
import random
from contextlib import contextmanager
from typing import Any, Generator, Iterable, List, Optional
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.base import Node
from langchain_community.graph_vectorstores.links import (
METADATA_LINKS_KEY,
Link,
add_links,
)
from tests.integration_tests.cache.fake_embeddings import (
AngularTwoDimensionalEmbeddings,
FakeEmbeddings,
)
TEST_KEYSPACE = "graph_test_keyspace"
class ParserEmbeddings(Embeddings):
"""Parse input texts: if they are json for a List[float], fine.
Otherwise, return all zeros and call it a day.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self.embed_query(txt) for txt in texts]
def embed_query(self, text: str) -> list[float]:
try:
vals = json.loads(text)
except json.JSONDecodeError:
return [0.0] * self.dimension
else:
assert len(vals) == self.dimension
return vals
@pytest.fixture
def embedding_d2() -> Embeddings:
return ParserEmbeddings(dimension=2)
class EarthEmbeddings(Embeddings):
def get_vector_near(self, value: float) -> List[float]:
base_point = [value, (1 - value**2) ** 0.5]
fluctuation = random.random() / 100.0
return [base_point[0] + fluctuation, base_point[1] - fluctuation]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self.embed_query(txt) for txt in texts]
def embed_query(self, text: str) -> list[float]:
words = set(text.lower().split())
if "earth" in words:
vector = self.get_vector_near(0.9)
elif {"planet", "world", "globe", "sphere"}.intersection(words):
vector = self.get_vector_near(0.8)
else:
vector = self.get_vector_near(0.1)
return vector
def _result_ids(docs: Iterable[Document]) -> List[Optional[str]]:
return [doc.id for doc in docs]
@pytest.fixture
def graph_vector_store_docs() -> list[Document]:
"""
This is a set of Documents to pre-populate a graph vector store,
with entries placed in a certain way.
Space of the entries (under Euclidean similarity):
A0 (*)
.... AL AR <....
: | :
: | ^ :
v | . v
| :
TR | : BL
T0 --------------x-------------- B0
TL | : BR
| :
| .
| .
|
FL FR
F0
the query point is meant to be at (*).
the A are bidirectionally with B
the A are outgoing to T
the A are incoming from F
The links are like: L with L, 0 with 0 and R with R.
"""
docs_a = [
Document(id="AL", page_content="[-1, 9]", metadata={"label": "AL"}),
Document(id="A0", page_content="[0, 10]", metadata={"label": "A0"}),
Document(id="AR", page_content="[1, 9]", metadata={"label": "AR"}),
]
docs_b = [
Document(id="BL", page_content="[9, 1]", metadata={"label": "BL"}),
Document(id="B0", page_content="[10, 0]", metadata={"label": "B0"}),
Document(id="BL", page_content="[9, -1]", metadata={"label": "BR"}),
]
docs_f = [
Document(id="FL", page_content="[1, -9]", metadata={"label": "FL"}),
Document(id="F0", page_content="[0, -10]", metadata={"label": "F0"}),
Document(id="FR", page_content="[-1, -9]", metadata={"label": "FR"}),
]
docs_t = [
Document(id="TL", page_content="[-9, -1]", metadata={"label": "TL"}),
Document(id="T0", page_content="[-10, 0]", metadata={"label": "T0"}),
Document(id="TR", page_content="[-9, 1]", metadata={"label": "TR"}),
]
for doc_a, suffix in zip(docs_a, ["l", "0", "r"]):
add_links(doc_a, Link.bidir(kind="ab_example", tag=f"tag_{suffix}"))
add_links(doc_a, Link.outgoing(kind="at_example", tag=f"tag_{suffix}"))
add_links(doc_a, Link.incoming(kind="af_example", tag=f"tag_{suffix}"))
for doc_b, suffix in zip(docs_b, ["l", "0", "r"]):
add_links(doc_b, Link.bidir(kind="ab_example", tag=f"tag_{suffix}"))
for doc_t, suffix in zip(docs_t, ["l", "0", "r"]):
add_links(doc_t, Link.incoming(kind="at_example", tag=f"tag_{suffix}"))
for doc_f, suffix in zip(docs_f, ["l", "0", "r"]):
add_links(doc_f, Link.outgoing(kind="af_example", tag=f"tag_{suffix}"))
return docs_a + docs_b + docs_f + docs_t
class CassandraSession:
table_name: str
session: Any
def __init__(self, table_name: str, session: Any):
self.table_name = table_name
self.session = session
@contextmanager
def get_cassandra_session(
table_name: str, drop: bool = True
) -> Generator[CassandraSession, None, None]:
"""Initialize the Cassandra cluster and session"""
from cassandra.cluster import Cluster
if "CASSANDRA_CONTACT_POINTS" in os.environ:
contact_points = [
cp.strip()
for cp in os.environ["CASSANDRA_CONTACT_POINTS"].split(",")
if cp.strip()
]
else:
contact_points = None
cluster = Cluster(contact_points)
session = cluster.connect()
try:
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {TEST_KEYSPACE}"
" WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': 1}"
)
)
if drop:
session.execute(f"DROP TABLE IF EXISTS {TEST_KEYSPACE}.{table_name}")
# Yield the session for usage
yield CassandraSession(table_name=table_name, session=session)
finally:
# Ensure proper shutdown/cleanup of resources
session.shutdown()
cluster.shutdown()
@pytest.fixture(scope="function")
def graph_vector_store_angular(
table_name: str = "graph_test_table",
) -> Generator[CassandraGraphVectorStore, None, None]:
with get_cassandra_session(table_name=table_name) as session:
yield CassandraGraphVectorStore(
embedding=AngularTwoDimensionalEmbeddings(),
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
)
@pytest.fixture(scope="function")
def graph_vector_store_earth(
table_name: str = "graph_test_table",
) -> Generator[CassandraGraphVectorStore, None, None]:
with get_cassandra_session(table_name=table_name) as session:
yield CassandraGraphVectorStore(
embedding=EarthEmbeddings(),
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
)
@pytest.fixture(scope="function")
def graph_vector_store_fake(
table_name: str = "graph_test_table",
) -> Generator[CassandraGraphVectorStore, None, None]:
with get_cassandra_session(table_name=table_name) as session:
yield CassandraGraphVectorStore(
embedding=FakeEmbeddings(),
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
)
@pytest.fixture(scope="function")
def graph_vector_store_d2(
embedding_d2: Embeddings,
table_name: str = "graph_test_table",
) -> Generator[CassandraGraphVectorStore, None, None]:
with get_cassandra_session(table_name=table_name) as session:
yield CassandraGraphVectorStore(
embedding=embedding_d2,
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
)
@pytest.fixture(scope="function")
def populated_graph_vector_store_d2(
graph_vector_store_d2: CassandraGraphVectorStore,
graph_vector_store_docs: list[Document],
) -> Generator[CassandraGraphVectorStore, None, None]:
graph_vector_store_d2.add_documents(graph_vector_store_docs)
yield graph_vector_store_d2
def test_mmr_traversal(graph_vector_store_angular: CassandraGraphVectorStore) -> None:
""" Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ \
/ | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==2 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order)
because v1 is "too close" to v0 (and v0 is closer than v1)).
Both v2 and v3 are reachable via edges from v0, so once it is
selected, those are both considered.
"""
v0 = Node(
id="v0",
text="-0.124",
links=[
Link.outgoing(kind="explicit", tag="link"),
],
)
v1 = Node(
id="v1",
text="+0.127",
)
v2 = Node(
id="v2",
text="+0.25",
links=[
Link.incoming(kind="explicit", tag="link"),
],
)
v3 = Node(
id="v3",
text="+1.0",
links=[
Link.incoming(kind="explicit", tag="link"),
],
)
g_store = graph_vector_store_angular
g_store.add_nodes([v0, v1, v2, v3])
results = g_store.mmr_traversal_search("0.0", k=2, fetch_k=2)
assert _result_ids(results) == ["v0", "v2"]
# With max depth 0, no edges are traversed, so this doesn't reach v2 or v3.
# So it ends up picking "v1" even though it's similar to "v0".
results = g_store.mmr_traversal_search("0.0", k=2, fetch_k=2, depth=0)
assert _result_ids(results) == ["v0", "v1"]
# With max depth 0 but higher `fetch_k`, we encounter v2
results = g_store.mmr_traversal_search("0.0", k=2, fetch_k=3, depth=0)
assert _result_ids(results) == ["v0", "v2"]
# v0 score is .46, v2 score is 0.16 so it won't be chosen.
results = g_store.mmr_traversal_search("0.0", k=2, score_threshold=0.2)
assert _result_ids(results) == ["v0"]
# with k=4 we should get all of the documents.
results = g_store.mmr_traversal_search("0.0", k=4)
assert _result_ids(results) == ["v0", "v2", "v1", "v3"]
def test_write_retrieve_keywords(
graph_vector_store_earth: CassandraGraphVectorStore,
) -> None:
greetings = Node(
id="greetings",
text="Typical Greetings",
links=[
Link.incoming(kind="parent", tag="parent"),
],
)
node1 = Node(
id="doc1",
text="Hello World",
links=[
Link.outgoing(kind="parent", tag="parent"),
Link.bidir(kind="kw", tag="greeting"),
Link.bidir(kind="kw", tag="world"),
],
)
node2 = Node(
id="doc2",
text="Hello Earth",
links=[
Link.outgoing(kind="parent", tag="parent"),
Link.bidir(kind="kw", tag="greeting"),
Link.bidir(kind="kw", tag="earth"),
],
)
g_store = graph_vector_store_earth
g_store.add_nodes(nodes=[greetings, node1, node2])
# Doc2 is more similar, but World and Earth are similar enough that doc1 also
# shows up.
results: Iterable[Document] = g_store.similarity_search("Earth", k=2)
assert _result_ids(results) == ["doc2", "doc1"]
results = g_store.similarity_search("Earth", k=1)
assert _result_ids(results) == ["doc2"]
results = g_store.traversal_search("Earth", k=2, depth=0)
assert _result_ids(results) == ["doc2", "doc1"]
results = g_store.traversal_search("Earth", k=2, depth=1)
assert _result_ids(results) == ["doc2", "doc1", "greetings"]
# K=1 only pulls in doc2 (Hello Earth)
results = g_store.traversal_search("Earth", k=1, depth=0)
assert _result_ids(results) == ["doc2"]
# K=1 only pulls in doc2 (Hello Earth). Depth=1 traverses to parent and via
# keyword edge.
results = g_store.traversal_search("Earth", k=1, depth=1)
assert set(_result_ids(results)) == {"doc2", "doc1", "greetings"}
def test_metadata(graph_vector_store_fake: CassandraGraphVectorStore) -> None:
doc_a = Node(
id="a",
text="A",
metadata={"other": "some other field"},
links=[
Link.incoming(kind="hyperlink", tag="http://a"),
Link.bidir(kind="other", tag="foo"),
],
)
g_store = graph_vector_store_fake
g_store.add_nodes([doc_a])
results = g_store.similarity_search("A")
assert len(results) == 1
assert results[0].id == "a"
metadata = results[0].metadata
assert metadata["other"] == "some other field"
assert set(metadata[METADATA_LINKS_KEY]) == {
Link.incoming(kind="hyperlink", tag="http://a"),
Link.bidir(kind="other", tag="foo"),
}
class TestCassandraGraphVectorStore:
def test_gvs_similarity_search_sync(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Simple (non-graph) similarity search on a graph vector g_store."""
g_store = populated_graph_vector_store_d2
ss_response = g_store.similarity_search(query="[2, 10]", k=2)
ss_labels = [doc.metadata["label"] for doc in ss_response]
assert ss_labels == ["AR", "A0"]
ss_by_v_response = g_store.similarity_search_by_vector(embedding=[2, 10], k=2)
ss_by_v_labels = [doc.metadata["label"] for doc in ss_by_v_response]
assert ss_by_v_labels == ["AR", "A0"]
async def test_gvs_similarity_search_async(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Simple (non-graph) similarity search on a graph vector store."""
g_store = populated_graph_vector_store_d2
ss_response = await g_store.asimilarity_search(query="[2, 10]", k=2)
ss_labels = [doc.metadata["label"] for doc in ss_response]
assert ss_labels == ["AR", "A0"]
ss_by_v_response = await g_store.asimilarity_search_by_vector(
embedding=[2, 10], k=2
)
ss_by_v_labels = [doc.metadata["label"] for doc in ss_by_v_response]
assert ss_by_v_labels == ["AR", "A0"]
def test_gvs_traversal_search_sync(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Graph traversal search on a graph vector store."""
g_store = populated_graph_vector_store_d2
ts_response = g_store.traversal_search(query="[2, 10]", k=2, depth=2)
# this is a set, as some of the internals of trav.search are set-driven
# so ordering is not deterministic:
ts_labels = {doc.metadata["label"] for doc in ts_response}
assert ts_labels == {"AR", "A0", "BR", "B0", "TR", "T0"}
# verify the same works as a retriever
retriever = g_store.as_retriever(
search_type="traversal", search_kwargs={"k": 2, "depth": 2}
)
ts_labels = {
doc.metadata["label"]
for doc in retriever.get_relevant_documents(query="[2, 10]")
}
assert ts_labels == {"AR", "A0", "BR", "B0", "TR", "T0"}
async def test_gvs_traversal_search_async(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Graph traversal search on a graph vector store."""
g_store = populated_graph_vector_store_d2
ts_labels = set()
async for doc in g_store.atraversal_search(query="[2, 10]", k=2, depth=2):
ts_labels.add(doc.metadata["label"])
# this is a set, as some of the internals of trav.search are set-driven
# so ordering is not deterministic:
assert ts_labels == {"AR", "A0", "BR", "B0", "TR", "T0"}
# verify the same works as a retriever
retriever = g_store.as_retriever(
search_type="traversal", search_kwargs={"k": 2, "depth": 2}
)
ts_labels = {
doc.metadata["label"]
for doc in await retriever.aget_relevant_documents(query="[2, 10]")
}
assert ts_labels == {"AR", "A0", "BR", "B0", "TR", "T0"}
def test_gvs_mmr_traversal_search_sync(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""MMR Graph traversal search on a graph vector store."""
g_store = populated_graph_vector_store_d2
mt_response = g_store.mmr_traversal_search(
query="[2, 10]",
k=2,
depth=2,
fetch_k=1,
adjacent_k=2,
lambda_mult=0.1,
)
# TODO: can this rightfully be a list (or must it be a set)?
mt_labels = {doc.metadata["label"] for doc in mt_response}
assert mt_labels == {"AR", "BR"}
async def test_gvs_mmr_traversal_search_async(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""MMR Graph traversal search on a graph vector store."""
g_store = populated_graph_vector_store_d2
mt_labels = set()
async for doc in g_store.ammr_traversal_search(
query="[2, 10]",
k=2,
depth=2,
fetch_k=1,
adjacent_k=2,
lambda_mult=0.1,
):
mt_labels.add(doc.metadata["label"])
# TODO: can this rightfully be a list (or must it be a set)?
assert mt_labels == {"AR", "BR"}
def test_gvs_metadata_search_sync(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Metadata search on a graph vector store."""
g_store = populated_graph_vector_store_d2
mt_response = g_store.metadata_search(
filter={"label": "T0"},
n=2,
)
doc: Document = next(iter(mt_response))
assert doc.page_content == "[-10, 0]"
links = doc.metadata["links"]
assert len(links) == 1
link: Link = links.pop()
assert isinstance(link, Link)
assert link.direction == "in"
assert link.kind == "at_example"
assert link.tag == "tag_0"
async def test_gvs_metadata_search_async(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Metadata search on a graph vector store."""
g_store = populated_graph_vector_store_d2
mt_response = await g_store.ametadata_search(
filter={"label": "T0"},
n=2,
)
doc: Document = next(iter(mt_response))
assert doc.page_content == "[-10, 0]"
links: set[Link] = doc.metadata["links"]
assert len(links) == 1
link: Link = links.pop()
assert isinstance(link, Link)
assert link.direction == "in"
assert link.kind == "at_example"
assert link.tag == "tag_0"
def test_gvs_get_by_document_id_sync(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Get by document_id on a graph vector store."""
g_store = populated_graph_vector_store_d2
doc = g_store.get_by_document_id(document_id="FL")
assert doc is not None
assert doc.page_content == "[1, -9]"
links = doc.metadata["links"]
assert len(links) == 1
link: Link = links.pop()
assert isinstance(link, Link)
assert link.direction == "out"
assert link.kind == "af_example"
assert link.tag == "tag_l"
invalid_doc = g_store.get_by_document_id(document_id="invalid")
assert invalid_doc is None
async def test_gvs_get_by_document_id_async(
self,
populated_graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
"""Get by document_id on a graph vector store."""
g_store = populated_graph_vector_store_d2
doc = await g_store.aget_by_document_id(document_id="FL")
assert doc is not None
assert doc.page_content == "[1, -9]"
links = doc.metadata["links"]
assert len(links) == 1
link: Link = links.pop()
assert isinstance(link, Link)
assert link.direction == "out"
assert link.kind == "af_example"
assert link.tag == "tag_l"
invalid_doc = await g_store.aget_by_document_id(document_id="invalid")
assert invalid_doc is None
def test_gvs_from_texts(
self,
graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
g_store = graph_vector_store_d2
g_store.add_texts(
texts=["[1, 2]"],
metadatas=[{"md": 1}],
ids=["x_id"],
)
hits = g_store.similarity_search("[2, 1]", k=2)
assert len(hits) == 1
assert hits[0].page_content == "[1, 2]"
assert hits[0].id == "x_id"
# there may be more re:graph structure.
assert hits[0].metadata["md"] == "1.0"
def test_gvs_from_documents_containing_ids(
self,
graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
the_document = Document(
page_content="[1, 2]",
metadata={"md": 1},
id="x_id",
)
g_store = graph_vector_store_d2
g_store.add_documents([the_document])
hits = g_store.similarity_search("[2, 1]", k=2)
assert len(hits) == 1
assert hits[0].page_content == "[1, 2]"
assert hits[0].id == "x_id"
# there may be more re:graph structure.
assert hits[0].metadata["md"] == "1.0"
def test_gvs_add_nodes_sync(
self,
*,
graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
links0 = [
Link(kind="kA", direction="out", tag="tA"),
Link(kind="kB", direction="bidir", tag="tB"),
]
links1 = [
Link(kind="kC", direction="in", tag="tC"),
]
nodes = [
Node(id="id0", text="[1, 0]", metadata={"m": 0}, links=links0),
Node(text="[-1, 0]", metadata={"m": 1}, links=links1),
]
graph_vector_store_d2.add_nodes(nodes)
hits = graph_vector_store_d2.similarity_search_by_vector([0.9, 0.1])
assert len(hits) == 2
assert hits[0].id == "id0"
assert hits[0].page_content == "[1, 0]"
md0 = hits[0].metadata
assert md0["m"] == "0.0"
assert any(isinstance(v, set) for k, v in md0.items() if k != "m")
assert hits[1].id != "id0"
assert hits[1].page_content == "[-1, 0]"
md1 = hits[1].metadata
assert md1["m"] == "1.0"
assert any(isinstance(v, set) for k, v in md1.items() if k != "m")
async def test_gvs_add_nodes_async(
self,
*,
graph_vector_store_d2: CassandraGraphVectorStore,
) -> None:
links0 = [
Link(kind="kA", direction="out", tag="tA"),
Link(kind="kB", direction="bidir", tag="tB"),
]
links1 = [
Link(kind="kC", direction="in", tag="tC"),
]
nodes = [
Node(id="id0", text="[1, 0]", metadata={"m": 0}, links=links0),
Node(text="[-1, 0]", metadata={"m": 1}, links=links1),
]
async for _ in graph_vector_store_d2.aadd_nodes(nodes):
pass
hits = await graph_vector_store_d2.asimilarity_search_by_vector([0.9, 0.1])
assert len(hits) == 2
assert hits[0].id == "id0"
assert hits[0].page_content == "[1, 0]"
md0 = hits[0].metadata
assert md0["m"] == "0.0"
assert any(isinstance(v, set) for k, v in md0.items() if k != "m")
assert hits[1].id != "id0"
assert hits[1].page_content == "[-1, 0]"
md1 = hits[1].metadata
assert md1["m"] == "1.0"
assert any(isinstance(v, set) for k, v in md1.items() if k != "m")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores/extractors/test_gliner_link_extractor.py | import pytest
from langchain_community.graph_vectorstores.extractors import GLiNERLinkExtractor
from langchain_community.graph_vectorstores.links import Link
PAGE_1 = """
Cristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃ'tjɐnu
ʁɔ'naldu]; born 5 February 1985) is a Portuguese professional footballer who
plays as a forward for and captains both Saudi Pro League club Al Nassr and the
Portugal national team. Widely regarded as one of the greatest players of all
time, Ronaldo has won five Ballon d'Or awards,[note 3] a record three UEFA Men's
Player of the Year Awards, and four European Golden Shoes, the most by a
European player. He has won 33 trophies in his career, including seven league
titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA
Nations League. Ronaldo holds the records for most appearances (183), goals
(140) and assists (42) in the Champions League, goals in the European
Championship (14), international goals (128) and international appearances
(205). He is one of the few players to have made over 1,200 professional career
appearances, the most by an outfield player, and has scored over 850 official
senior career goals for club and country, making him the top goalscorer of all
time.
"""
@pytest.mark.requires("gliner")
def test_one_from_keywords() -> None:
extractor = GLiNERLinkExtractor(
labels=["Person", "Award", "Date", "Competitions", "Teams"]
)
results = extractor.extract_one(PAGE_1)
assert results == {
Link.bidir(kind="entity:Person", tag="Cristiano Ronaldo dos Santos Aveiro"),
Link.bidir(kind="entity:Award", tag="European Golden Shoes"),
Link.bidir(kind="entity:Competitions", tag="European\nChampionship"),
Link.bidir(kind="entity:Award", tag="UEFA Men's\nPlayer of the Year Awards"),
Link.bidir(kind="entity:Date", tag="5 February 1985"),
Link.bidir(kind="entity:Competitions", tag="UEFA Champions Leagues"),
Link.bidir(kind="entity:Teams", tag="Portugal national team"),
Link.bidir(kind="entity:Competitions", tag="UEFA European Championship"),
Link.bidir(kind="entity:Competitions", tag="UEFA\nNations League"),
Link.bidir(kind="entity:Award", tag="Ballon d'Or"),
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores | lc_public_repos/langchain/libs/community/tests/integration_tests/graph_vectorstores/extractors/test_keybert_link_extractor.py | import pytest
from langchain_community.graph_vectorstores.extractors import KeybertLinkExtractor
from langchain_community.graph_vectorstores.links import Link
PAGE_1 = """
Supervised learning is the machine learning task of learning a function that
maps an input to an output based on example input-output pairs. It infers a
function from labeled training data consisting of a set of training examples. In
supervised learning, each example is a pair consisting of an input object
(typically a vector) and a desired output value (also called the supervisory
signal). A supervised learning algorithm analyzes the training data and produces
an inferred function, which can be used for mapping new examples. An optimal
scenario will allow for the algorithm to correctly determine the class labels
for unseen instances. This requires the learning algorithm to generalize from
the training data to unseen situations in a 'reasonable' way (see inductive
bias).
"""
PAGE_2 = """
KeyBERT is a minimal and easy-to-use keyword extraction technique that leverages
BERT embeddings to create keywords and keyphrases that are most similar to a
document.
"""
@pytest.mark.requires("keybert")
def test_one_from_keywords() -> None:
extractor = KeybertLinkExtractor()
results = extractor.extract_one(PAGE_1)
assert results == {
Link.bidir(kind="kw", tag="supervised"),
Link.bidir(kind="kw", tag="labels"),
Link.bidir(kind="kw", tag="labeled"),
Link.bidir(kind="kw", tag="learning"),
Link.bidir(kind="kw", tag="training"),
}
@pytest.mark.requires("keybert")
def test_many_from_keyphrases() -> None:
extractor = KeybertLinkExtractor(
extract_keywords_kwargs={
"keyphrase_ngram_range": (1, 2),
}
)
results = list(extractor.extract_many([PAGE_1, PAGE_2]))
assert results[0] == {
Link.bidir(kind="kw", tag="supervised"),
Link.bidir(kind="kw", tag="labeled training"),
Link.bidir(kind="kw", tag="supervised learning"),
Link.bidir(kind="kw", tag="examples supervised"),
Link.bidir(kind="kw", tag="signal supervised"),
}
assert results[1] == {
Link.bidir(kind="kw", tag="keyphrases"),
Link.bidir(kind="kw", tag="keyword extraction"),
Link.bidir(kind="kw", tag="keybert"),
Link.bidir(kind="kw", tag="keywords keyphrases"),
Link.bidir(kind="kw", tag="keybert minimal"),
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_transformers/test_embeddings_filter.py | """Integration test for embedding-based redundant doc filtering."""
from langchain_core.documents import Document
from langchain_community.document_transformers.embeddings_redundant_filter import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
_DocumentWithState,
)
from langchain_community.embeddings import OpenAIEmbeddings
def test_embeddings_redundant_filter() -> None:
texts = [
"What happened to all of my cookies?",
"Where did all of my cookies go?",
"I wish there were better Italian restaurants in my neighborhood.",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 2
assert set(texts[:2]).intersection([d.page_content for d in actual])
def test_embeddings_redundant_filter_with_state() -> None:
texts = ["What happened to all of my cookies?", "foo bar baz"]
state = {"embedded_doc": [0.5] * 10}
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 1
def test_embeddings_clustering_filter() -> None:
texts = [
"What happened to all of my cookies?",
"A cookie is a small, baked sweet treat and you can find it in the cookie",
"monsters' jar.",
"Cookies are good.",
"I have nightmares about the cookie monster.",
"The most popular pizza styles are: Neapolitan, New York-style and",
"Chicago-style. You can find them on iconic restaurants in major cities.",
"Neapolitan pizza: This is the original pizza style,hailing from Naples,",
"Italy.",
"I wish there were better Italian Pizza restaurants in my neighborhood.",
"New York-style pizza: This is characterized by its large, thin crust, and",
"generous toppings.",
"The first movie to feature a robot was 'A Trip to the Moon' (1902).",
"The first movie to feature a robot that could pass for a human was",
"'Blade Runner' (1982)",
"The first movie to feature a robot that could fall in love with a human",
"was 'Her' (2013)",
"A robot is a machine capable of carrying out complex actions automatically.",
"There are certainly hundreds, if not thousands movies about robots like:",
"'Blade Runner', 'Her' and 'A Trip to the Moon'",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsClusteringFilter(
embeddings=embeddings,
num_clusters=3,
num_closest=1,
sorted=True,
)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 3
assert texts[1] in [d.page_content for d in actual]
assert texts[4] in [d.page_content for d in actual]
assert texts[11] in [d.page_content for d in actual]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_slack.py | """Tests for the Slack directory loader"""
from pathlib import Path
from langchain_community.document_loaders import SlackDirectoryLoader
def test_slack_directory_loader() -> None:
"""Test Slack directory loader."""
file_path = Path(__file__).parent.parent / "examples/slack_export.zip"
loader = SlackDirectoryLoader(str(file_path))
docs = loader.load()
assert len(docs) == 5
def test_slack_directory_loader_urls() -> None:
"""Test workspace URLS are passed through in the SlackDirectoryloader."""
file_path = Path(__file__).parent.parent / "examples/slack_export.zip"
workspace_url = "example_workspace.com"
loader = SlackDirectoryLoader(str(file_path), workspace_url)
docs = loader.load()
for doc in docs:
assert doc.metadata["source"].startswith(workspace_url)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_modern_treasury.py | from langchain_community.document_loaders.modern_treasury import ModernTreasuryLoader
def test_modern_treasury_loader() -> None:
"""Test Modern Treasury file loader."""
modern_treasury_loader = ModernTreasuryLoader("payment_orders")
documents = modern_treasury_loader.load()
assert len(documents) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_quip.py | from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders.quip import QuipLoader
try:
from quip_api.quip import QuipClient # noqa: F401
quip_installed = True
except ImportError:
quip_installed = False
@pytest.fixture
def mock_quip(): # type: ignore
# mock quip_client
with patch("quip_api.quip.QuipClient") as mock_quip:
yield mock_quip
@pytest.mark.requires("quip_api")
class TestQuipLoader:
API_URL: str = "https://example-api.quip.com"
DOC_URL_PREFIX = ("https://example.quip.com",)
ACCESS_TOKEN: str = "api_token"
MOCK_FOLDER_IDS = ["ABC"]
MOCK_THREAD_IDS = ["ABC", "DEF"]
def test_quip_loader_initialization(self, mock_quip: MagicMock) -> None:
QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60)
mock_quip.assert_called_once_with(
access_token=self.ACCESS_TOKEN, base_url=self.API_URL, request_timeout=60
)
def test_quip_loader_load_date_invalid_args(self) -> None:
quip_loader = QuipLoader(
self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60
)
with pytest.raises(
ValueError,
match="Must specify at least one among `folder_ids`, `thread_ids` or "
"set `include_all`_folders as True",
):
quip_loader.load()
def test_quip_loader_load_data_by_folder_id(self, mock_quip: MagicMock) -> None:
mock_quip.get_folder.side_effect = [
self._get_mock_folder(self.MOCK_FOLDER_IDS[0])
]
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(folder_ids=[self.MOCK_FOLDER_IDS[0]])
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def test_quip_loader_load_data_all_folder(self, mock_quip: MagicMock) -> None:
mock_quip.get_authenticated_user.side_effect = [
self._get_mock_authenticated_user()
]
mock_quip.get_folder.side_effect = [
self._get_mock_folder(self.MOCK_FOLDER_IDS[0]),
]
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(include_all_folders=True)
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def test_quip_loader_load_data_by_thread_id(self, mock_quip: MagicMock) -> None:
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(thread_ids=self.MOCK_THREAD_IDS)
assert mock_quip.get_folder.call_count == 0
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def _get_mock_quip_loader(self, mock_quip: MagicMock) -> QuipLoader:
quip_loader = QuipLoader(
self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60
)
quip_loader.quip_client = mock_quip
return quip_loader
def _get_mock_folder(self, folder_id: str) -> Dict:
return {
"folder": {
"title": "runbook",
"creator_id": "testing",
"folder_type": "shared",
"parent_id": "ABCD",
"inherit_mode": "inherit",
"color": "manila",
"id": f"{folder_id}",
"created_usec": 1668405728528904,
"updated_usec": 1697356632672453,
"link": "https://example.quip.com/YPH9OAR2Eu5",
},
"member_ids": [],
"children": [
{"thread_id": "ABC"},
{"thread_id": "DEF"},
],
}
def _get_mock_thread(self, thread_id: str) -> Dict:
return {
"thread": {
"author_id": "testing",
"thread_class": "document",
"owning_company_id": "ABC",
"id": f"{thread_id}",
"created_usec": 1690873126670055,
"updated_usec": 1690874891638991,
"title": f"Unit Test Doc {thread_id}",
"link": f"https://example.quip.com/{thread_id}",
"document_id": "ABC",
"type": "document",
"is_template": False,
"is_deleted": False,
},
"user_ids": [],
"shared_folder_ids": ["ABC"],
"expanded_user_ids": ["ABCDEFG"],
"invited_user_emails": [],
"access_levels": {"ABCD": {"access_level": "OWN"}},
"html": "<h1 id='temp:C:ABCD'>How to write Python Test </h1>",
}
def _get_mock_authenticated_user(self) -> Dict:
return {"shared_folder_ids": self.MOCK_FOLDER_IDS, "id": "Test"}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_sql_database.py | """
Test SQLAlchemy document loader functionality on behalf of SQLite and PostgreSQL.
To run the tests for SQLite, you need to have the `sqlite3` package installed.
To run the tests for PostgreSQL, you need to have the `psycopg2` package installed.
In addition, to launch the PostgreSQL instance, you can use the docker compose file
located at the root of the repo, `langchain/docker/docker-compose.yml`. Use the
command `docker compose up postgres` to start the instance. It will have the
appropriate credentials set up including being exposed on the appropriate port.
"""
import functools
import logging
import typing
import warnings
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
import sqlalchemy as sa
from langchain_community.utilities.sql_database import SQLDatabase
if typing.TYPE_CHECKING:
from _pytest.python import Metafunc
from langchain_community.document_loaders.sql_database import SQLDatabaseLoader
from tests.data import MLB_TEAMS_2012_SQL
logging.basicConfig(level=logging.DEBUG)
try:
import sqlite3 # noqa: F401
sqlite3_installed = True
except ImportError:
warnings.warn("sqlite3 not installed, skipping corresponding tests", UserWarning)
sqlite3_installed = False
try:
import psycopg2 # noqa: F401
psycopg2_installed = True
except ImportError:
warnings.warn("psycopg2 not installed, skipping corresponding tests", UserWarning)
psycopg2_installed = False
@pytest.fixture()
def engine(db_uri: str) -> sa.Engine:
"""
Return an SQLAlchemy engine object.
"""
return sa.create_engine(db_uri, echo=False)
@pytest.fixture()
def db(engine: sa.Engine) -> SQLDatabase:
return SQLDatabase(engine=engine)
@pytest.fixture()
def provision_database(engine: sa.Engine) -> None:
"""
Provision database with table schema and data.
"""
sql_statements = MLB_TEAMS_2012_SQL.read_text()
with engine.connect() as connection:
connection.execute(sa.text("DROP TABLE IF EXISTS mlb_teams_2012;"))
for statement in sql_statements.split(";"):
statement = statement.strip()
if not statement:
continue
connection.execute(sa.text(statement))
connection.commit()
tmpdir = TemporaryDirectory()
def pytest_generate_tests(metafunc: "Metafunc") -> None:
"""
Dynamically parameterize test cases to verify both SQLite and PostgreSQL.
"""
if "db_uri" in metafunc.fixturenames:
urls = []
ids = []
if sqlite3_installed:
db_path = Path(tmpdir.name).joinpath("testdrive.sqlite")
urls.append(f"sqlite:///{db_path}")
ids.append("sqlite")
if psycopg2_installed:
# We use non-standard port for testing purposes.
# The easiest way to spin up the PostgreSQL instance is to use
# the docker compose file at the root of the repo located at
# langchain/docker/docker-compose.yml
# use `docker compose up postgres` to start the instance
# it will have the appropriate credentials set up including
# being exposed on the appropriate port.
urls.append(
"postgresql+psycopg2://langchain:langchain@localhost:6023/langchain"
)
ids.append("postgresql")
metafunc.parametrize("db_uri", urls, ids=ids)
def test_sqldatabase_loader_no_options(db: SQLDatabase) -> None:
"""Test SQLAlchemy loader basics."""
loader = SQLDatabaseLoader("SELECT 1 AS a, 2 AS b", db=db)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1\nb: 2"
assert docs[0].metadata == {}
def test_sqldatabase_loader_include_rownum_into_metadata(db: SQLDatabase) -> None:
"""Test SQLAlchemy loader with row number in metadata."""
loader = SQLDatabaseLoader(
"SELECT 1 AS a, 2 AS b",
db=db,
include_rownum_into_metadata=True,
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1\nb: 2"
assert docs[0].metadata == {"row": 0}
def test_sqldatabase_loader_include_query_into_metadata(db: SQLDatabase) -> None:
"""Test SQLAlchemy loader with query in metadata."""
loader = SQLDatabaseLoader(
"SELECT 1 AS a, 2 AS b", db=db, include_query_into_metadata=True
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1\nb: 2"
assert docs[0].metadata == {"query": "SELECT 1 AS a, 2 AS b"}
def test_sqldatabase_loader_page_content_columns(db: SQLDatabase) -> None:
"""Test SQLAlchemy loader with defined page content columns."""
# Define a custom callback function to convert a row into a "page content" string.
row_to_content = functools.partial(
SQLDatabaseLoader.page_content_default_mapper, column_names=["a"]
)
loader = SQLDatabaseLoader(
"SELECT 1 AS a, 2 AS b UNION SELECT 3 AS a, 4 AS b",
db=db,
page_content_mapper=row_to_content,
)
docs = loader.load()
assert len(docs) == 2
assert docs[0].page_content == "a: 1"
assert docs[0].metadata == {}
assert docs[1].page_content == "a: 3"
assert docs[1].metadata == {}
def test_sqldatabase_loader_metadata_columns(db: SQLDatabase) -> None:
"""Test SQLAlchemy loader with defined metadata columns."""
# Define a custom callback function to convert a row into a "metadata" dictionary.
row_to_metadata = functools.partial(
SQLDatabaseLoader.metadata_default_mapper, column_names=["b"]
)
loader = SQLDatabaseLoader(
"SELECT 1 AS a, 2 AS b",
db=db,
metadata_mapper=row_to_metadata,
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata == {"b": 2}
def test_sqldatabase_loader_real_data_with_sql_no_parameters(
db: SQLDatabase, provision_database: None
) -> None:
"""Test SQLAlchemy loader with real data, querying by SQL statement."""
loader = SQLDatabaseLoader(
query='SELECT * FROM mlb_teams_2012 ORDER BY "Team";',
db=db,
)
docs = loader.load()
assert len(docs) == 30
assert docs[0].page_content == "Team: Angels\nPayroll (millions): 154.49\nWins: 89"
assert docs[0].metadata == {}
def test_sqldatabase_loader_real_data_with_sql_and_parameters(
db: SQLDatabase, provision_database: None
) -> None:
"""Test SQLAlchemy loader, querying by SQL statement and parameters."""
loader = SQLDatabaseLoader(
query='SELECT * FROM mlb_teams_2012 WHERE "Team" LIKE :search ORDER BY "Team";',
parameters={"search": "R%"},
db=db,
)
docs = loader.load()
assert len(docs) == 6
assert docs[0].page_content == "Team: Rangers\nPayroll (millions): 120.51\nWins: 93"
assert docs[0].metadata == {}
def test_sqldatabase_loader_real_data_with_selectable(
db: SQLDatabase, provision_database: None
) -> None:
"""Test SQLAlchemy loader with real data, querying by SQLAlchemy selectable."""
# Define an SQLAlchemy table.
mlb_teams_2012 = sa.Table(
"mlb_teams_2012",
sa.MetaData(),
sa.Column("Team", sa.VARCHAR),
sa.Column("Payroll (millions)", sa.FLOAT),
sa.Column("Wins", sa.BIGINT),
)
# Query the database table using an SQLAlchemy selectable.
select = sa.select(mlb_teams_2012).order_by(mlb_teams_2012.c.Team)
loader = SQLDatabaseLoader(
query=select,
db=db,
include_query_into_metadata=True,
)
docs = loader.load()
assert len(docs) == 30
assert docs[0].page_content == "Team: Angels\nPayroll (millions): 154.49\nWins: 89"
assert docs[0].metadata == {
"query": 'SELECT mlb_teams_2012."Team", mlb_teams_2012."Payroll (millions)", '
'mlb_teams_2012."Wins" \nFROM mlb_teams_2012 '
'ORDER BY mlb_teams_2012."Team"'
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_duckdb.py | import unittest
from langchain_community.document_loaders.duckdb_loader import DuckDBLoader
try:
import duckdb # noqa: F401
duckdb_installed = True
except ImportError:
duckdb_installed = False
@unittest.skipIf(not duckdb_installed, "duckdb not installed")
def test_duckdb_loader_no_options() -> None:
"""Test DuckDB loader."""
loader = DuckDBLoader("SELECT 1 AS a, 2 AS b")
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1\nb: 2"
assert docs[0].metadata == {}
@unittest.skipIf(not duckdb_installed, "duckdb not installed")
def test_duckdb_loader_page_content_columns() -> None:
"""Test DuckDB loader."""
loader = DuckDBLoader(
"SELECT 1 AS a, 2 AS b UNION SELECT 3 AS a, 4 AS b",
page_content_columns=["a"],
)
docs = loader.load()
assert len(docs) == 2
assert docs[0].page_content == "a: 1"
assert docs[0].metadata == {}
assert docs[1].page_content == "a: 3"
assert docs[1].metadata == {}
@unittest.skipIf(not duckdb_installed, "duckdb not installed")
def test_duckdb_loader_metadata_columns() -> None:
"""Test DuckDB loader."""
loader = DuckDBLoader(
"SELECT 1 AS a, 2 AS b",
page_content_columns=["a"],
metadata_columns=["b"],
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1"
assert docs[0].metadata == {"b": 2}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_github.py | from langchain_community.document_loaders.github import GitHubIssuesLoader
def test_issues_load() -> None:
title = " Add caching to BaseChatModel (issue #1644)"
loader = GitHubIssuesLoader(
repo="langchain-ai/langchain",
creator="UmerHA",
state="all",
per_page=3,
page=2,
access_token="""""",
)
docs = loader.load()
titles = [d.metadata["title"] for d in docs]
assert title in titles
assert all(doc.metadata["creator"] == "UmerHA" for doc in docs)
assert len(docs) == 3
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_pyspark_dataframe_loader.py | import random
import string
from langchain_core.documents import Document
from langchain_community.document_loaders.pyspark_dataframe import (
PySparkDataFrameLoader,
)
def test_pyspark_loader_load_valid_data() -> None:
from pyspark.sql import SparkSession
# Requires a session to be set up
spark = SparkSession.builder.getOrCreate()
data = [
(random.choice(string.ascii_letters), random.randint(0, 1)) for _ in range(3)
]
df = spark.createDataFrame(data, ["text", "label"])
expected_docs = [
Document(
page_content=data[0][0],
metadata={"label": data[0][1]},
),
Document(
page_content=data[1][0],
metadata={"label": data[1][1]},
),
Document(
page_content=data[2][0],
metadata={"label": data[2][1]},
),
]
loader = PySparkDataFrameLoader(
spark_session=spark, df=df, page_content_column="text"
)
result = loader.load()
assert result == expected_docs
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_etherscan.py | import os
import pytest
from langchain_community.document_loaders import EtherscanLoader
if "ETHERSCAN_API_KEY" in os.environ:
etherscan_key_set = True
api_key = os.environ["ETHERSCAN_API_KEY"]
else:
etherscan_key_set = False
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_normal_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address)
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_internal_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="internal_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc20_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc20_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc721_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc721_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc1155_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc1155_transaction")
result = loader.load()
assert len(result) == 1, "Wrong transactions returned"
assert result[0].page_content == "", "Wrong transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_eth_balance() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="eth_balance")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_invalid_filter() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
with pytest.raises(ValueError) as error_invalid_filter:
EtherscanLoader(account_address, filter="internal_saction")
assert str(error_invalid_filter.value) == "Invalid filter internal_saction"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_recursive_url_loader.py | from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader
def test_async_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url,
extractor=lambda _: "placeholder",
use_async=True,
max_depth=3,
timeout=None,
check_response_status=True,
)
docs = loader.load()
assert len(docs) == 512
assert docs[0].page_content == "placeholder"
def test_async_recursive_url_loader_deterministic() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url,
use_async=True,
max_depth=3,
timeout=None,
)
docs = sorted(loader.load(), key=lambda d: d.metadata["source"])
docs_2 = sorted(loader.load(), key=lambda d: d.metadata["source"])
assert docs == docs_2
def test_sync_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url, extractor=lambda _: "placeholder", use_async=False, max_depth=2
)
docs = loader.load()
assert len(docs) == 24
assert docs[0].page_content == "placeholder"
def test_sync_async_equivalent() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
async_loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
docs = sorted(loader.load(), key=lambda d: d.metadata["source"])
async_docs = sorted(async_loader.load(), key=lambda d: d.metadata["source"])
assert docs == async_docs
def test_loading_invalid_url() -> None:
url = "https://this.url.is.invalid/this/is/a/test"
loader = RecursiveUrlLoader(
url, max_depth=1, extractor=lambda _: "placeholder", use_async=False
)
docs = loader.load()
assert len(docs) == 0
def test_sync_async_metadata_necessary_properties() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
async_loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
docs = loader.load()
async_docs = async_loader.load()
for doc in docs:
assert "source" in doc.metadata
assert "content_type" in doc.metadata
for doc in async_docs:
assert "source" in doc.metadata
assert "content_type" in doc.metadata
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_rst.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredRSTLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_rst_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "README.rst")
loader = UnstructuredRSTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_csv_loader.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredCSVLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_csv_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "stanley-cups.csv")
loader = UnstructuredCSVLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_polars_dataframe.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import PolarsDataFrameLoader
if TYPE_CHECKING:
import polars as pl
@pytest.fixture
def sample_data_frame() -> pl.DataFrame:
import polars as pl
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
return pl.DataFrame(data)
def test_load_returns_list_of_documents(sample_data_frame: pl.DataFrame) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
def test_load_converts_dataframe_columns_to_document_metadata(
sample_data_frame: pl.DataFrame,
) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
df: pl.DataFrame = sample_data_frame[i]
assert df is not None
assert doc.metadata["author"] == df.select("author").item()
assert doc.metadata["date"] == df.select("date").item()
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pl.DataFrame,
) -> None:
sample_data_frame = sample_data_frame.rename(mapping={"text": "dummy_test_column"})
loader = PolarsDataFrameLoader(
sample_data_frame, page_content_column="dummy_test_column"
)
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_json_loader.py | from pathlib import Path
from langchain_community.document_loaders import JSONLoader
def test_json_loader() -> None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / "examples/example.json"
loader = JSONLoader(str(file_path), ".messages[].content")
docs = loader.load()
# Check that the correct number of documents are loaded.
assert len(docs) == 3
# Make sure that None content are converted to empty strings.
assert docs[-1].page_content == ""
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_larksuite.py | from langchain_community.document_loaders.larksuite import (
LarkSuiteDocLoader,
LarkSuiteWikiLoader,
)
DOMAIN = ""
ACCESS_TOKEN = ""
DOCUMENT_ID = ""
def test_larksuite_doc_loader() -> None:
"""Test LarkSuite (FeiShu) document loader."""
loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content is not None
def test_larksuite_wiki_loader() -> None:
"""Test LarkSuite (FeiShu) wiki loader."""
loader = LarkSuiteWikiLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content is not None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_blockchain.py | import os
import time
import pytest
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
if "ALCHEMY_API_KEY" in os.environ:
alchemyKeySet = True
apiKey = os.environ["ALCHEMY_API_KEY"]
else:
alchemyKeySet = False
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_valid_contract() -> None:
max_alchemy_tokens = 100
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # CoolCats contract address
)
result = BlockchainDocumentLoader(contract_address).load()
print("Tokens returned for valid contract: ", len(result)) # noqa: T201
assert len(result) == max_alchemy_tokens, (
f"Wrong number of NFTs returned. "
f"Expected {max_alchemy_tokens}, got {len(result)}"
)
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_with_pagination() -> None:
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # CoolCats contract address
)
startToken = "0x0000000000000000000000000000000000000000000000000000000000000077"
result = BlockchainDocumentLoader(
contract_address,
BlockchainType.ETH_MAINNET,
api_key=apiKey,
startToken=startToken,
).load()
print("Tokens returned for contract with offset: ", len(result)) # noqa: T201
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_polygon() -> None:
contract_address = (
"0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9" # Polygon contract address
)
result = BlockchainDocumentLoader(
contract_address, BlockchainType.POLYGON_MAINNET
).load()
print("Tokens returned for contract on Polygon: ", len(result)) # noqa: T201
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_invalid_contract() -> None:
contract_address = (
"0x111D4e82EA7eCA7F62c3fdf6D39A541be95Bf111" # Invalid contract address
)
with pytest.raises(ValueError) as error_NoNfts:
BlockchainDocumentLoader(contract_address).load()
assert (
str(error_NoNfts.value)
== "No NFTs found for contract address " + contract_address
)
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_all() -> None:
start_time = time.time()
contract_address = (
"0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9" # Polygon contract address
)
result = BlockchainDocumentLoader(
contract_address=contract_address,
blockchainType=BlockchainType.POLYGON_MAINNET,
api_key=os.environ["ALCHEMY_API_KEY"],
startToken="100",
get_all_tokens=True,
).load()
end_time = time.time()
print( # noqa: T201
f"Tokens returned for {contract_address} "
f"contract: {len(result)} in {end_time - start_time} seconds"
)
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_all_10sec_timeout() -> None:
start_time = time.time()
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # Cool Cats contract address
)
with pytest.raises(RuntimeError):
BlockchainDocumentLoader(
contract_address=contract_address,
blockchainType=BlockchainType.ETH_MAINNET,
api_key=os.environ["ALCHEMY_API_KEY"],
get_all_tokens=True,
max_execution_time=10,
).load()
end_time = time.time()
print("Execution took ", end_time - start_time, " seconds") # noqa: T201
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_pdf.py | from pathlib import Path
from typing import Sequence, Union
import pytest
from langchain_community.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
PDFMinerLoader,
PDFMinerPDFasHTMLLoader,
PyMuPDFLoader,
PyPDFium2Loader,
UnstructuredPDFLoader,
)
def test_unstructured_pdf_loader_elements_mode() -> None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = UnstructuredPDFLoader(str(file_path), mode="elements")
docs = loader.load()
assert len(docs) == 2
def test_unstructured_pdf_loader_paged_mode() -> None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = UnstructuredPDFLoader(str(file_path), mode="paged")
docs = loader.load()
assert len(docs) == 16
def test_unstructured_pdf_loader_default_mode() -> None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = UnstructuredPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
def test_pdfminer_loader() -> None:
"""Test PDFMiner loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PDFMinerLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PDFMinerLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
# Verify that concatenating pages parameter works
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PDFMinerLoader(str(file_path), concatenate_pages=True)
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PDFMinerLoader(str(file_path), concatenate_pages=False)
docs = loader.load()
assert len(docs) == 16
def test_pdfminer_pdf_as_html_loader() -> None:
"""Test PDFMinerPDFasHTMLLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PDFMinerPDFasHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PDFMinerPDFasHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
def test_pypdfium2_loader() -> None:
"""Test PyPDFium2Loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyPDFium2Loader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyPDFium2Loader(str(file_path))
docs = loader.load()
assert len(docs) == 16
def test_pymupdf_loader() -> None:
"""Test PyMuPDF loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
assert loader.web_path is None
web_path = "https://people.sc.fsu.edu/~jpeterson/hello_world.pdf"
loader = PyMuPDFLoader(web_path)
docs = loader.load()
assert loader.web_path == web_path
assert loader.file_path != web_path
assert len(docs) == 1
def test_mathpix_loader() -> None:
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = MathpixPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content) # noqa: T201
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = MathpixPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content) # noqa: T201
@pytest.mark.parametrize(
"file_path, features, docs_length, create_client",
[
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
["FORMS", "TABLES", "LAYOUT"],
1,
False,
),
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
[],
1,
False,
),
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
["TABLES"],
1,
False,
),
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
["FORMS"],
1,
False,
),
(
(
"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com"
"/langchain/alejandro_rosalez_sample_1.jpg"
),
["LAYOUT"],
1,
False,
),
(str(Path(__file__).parent.parent / "examples/hello.pdf"), ["FORMS"], 1, False),
(str(Path(__file__).parent.parent / "examples/hello.pdf"), [], 1, False),
(
"s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf",
["FORMS", "TABLES", "LAYOUT"],
16,
True,
),
],
)
@pytest.mark.skip(reason="Requires AWS credentials to run")
def test_amazontextract_loader(
file_path: str,
features: Union[Sequence[str], None],
docs_length: int,
create_client: bool,
) -> None:
if create_client:
import boto3
textract_client = boto3.client("textract", region_name="us-east-2")
loader = AmazonTextractPDFLoader(
file_path, textract_features=features, client=textract_client
)
else:
loader = AmazonTextractPDFLoader(file_path, textract_features=features)
docs = loader.load()
print(docs) # noqa: T201
assert len(docs) == docs_length
@pytest.mark.skip(reason="Requires AWS credentials to run")
def test_amazontextract_loader_failures() -> None:
# 2-page PDF local file system
two_page_pdf = str(
Path(__file__).parent.parent / "examples/multi-page-forms-sample-2-page.pdf"
)
loader = AmazonTextractPDFLoader(two_page_pdf)
with pytest.raises(ValueError):
loader.load()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_rss.py | from pathlib import Path
from langchain_community.document_loaders.rss import RSSFeedLoader
def test_rss_loader() -> None:
loader = RSSFeedLoader(urls=["https://www.engadget.com/rss.xml"])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "feed" in metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
def test_rss_loader_with_opml() -> None:
file_path = Path(__file__).parent.parent / "examples"
with open(file_path.joinpath("sample_rss_feeds.opml"), "r") as f:
loader = RSSFeedLoader(opml=f.read())
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "feed" in metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_llmsherpa.py | from langchain_community.document_loaders.llmsherpa import LLMSherpaFileLoader
file_path = "https://arxiv.org/pdf/2402.14207.pdf"
def test_llmsherpa_file_loader_initialization() -> None:
loader = LLMSherpaFileLoader(
file_path=file_path,
)
docs = loader.load()
assert isinstance(loader, LLMSherpaFileLoader)
assert hasattr(docs, "__iter__")
assert loader.strategy == "chunks"
assert (
loader.url
== "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all&useNewIndentParser=true&applyOcr=yes"
)
assert len(docs) > 1
def test_apply_ocr() -> None:
loader = LLMSherpaFileLoader(
file_path=file_path,
apply_ocr=True,
new_indent_parser=False,
)
docs = loader.load()
assert (
loader.url
== "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all&applyOcr=yes"
)
assert len(docs) > 1
def test_new_indent_parser() -> None:
loader = LLMSherpaFileLoader(
file_path=file_path,
apply_ocr=False,
new_indent_parser=True,
)
docs = loader.load()
assert (
loader.url
== "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all&useNewIndentParser=true"
)
assert len(docs) > 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_lakefs.py | import unittest
from typing import Any
from unittest.mock import patch
import pytest
import requests_mock
from requests_mock.mocker import Mocker
from langchain_community.document_loaders.lakefs import LakeFSLoader
@pytest.fixture
def mock_lakefs_client() -> Any:
with patch(
"langchain_community.document_loaders.lakefs.LakeFSClient"
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [
("path_bla.txt", "https://physical_address_bla")
]
mock_lakefs_client.return_value.is_presign_supported.return_value = True
yield mock_lakefs_client.return_value
@pytest.fixture
def mock_lakefs_client_no_presign_not_local() -> Any:
with patch(
"langchain_community.document_loaders.lakefs.LakeFSClient"
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [
("path_bla.txt", "https://physical_address_bla")
]
mock_lakefs_client.return_value.is_presign_supported.return_value = False
yield mock_lakefs_client.return_value
@pytest.fixture
def mock_unstructured_local() -> Any:
with patch(
"langchain_community.document_loaders.lakefs.UnstructuredLakeFSLoader"
) as mock_unstructured_lakefs:
mock_unstructured_lakefs.return_value.load.return_value = [
("text content", "pdf content")
]
yield mock_unstructured_lakefs.return_value
@pytest.fixture
def mock_lakefs_client_no_presign_local() -> Any:
with patch(
"langchain_community.document_loaders.lakefs.LakeFSClient"
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [
("path_bla.txt", "local:///physical_address_bla")
]
mock_lakefs_client.return_value.is_presign_supported.return_value = False
yield mock_lakefs_client.return_value
class TestLakeFSLoader(unittest.TestCase):
lakefs_access_key = "lakefs_access_key"
lakefs_secret_key = "lakefs_secret_key"
endpoint = "endpoint"
repo = "repo"
ref = "ref"
path = "path"
@requests_mock.Mocker()
@pytest.mark.usefixtures("mock_lakefs_client")
def test_presigned_loading(self, mocker: Mocker) -> None:
mocker.register_uri("GET", requests_mock.ANY, text="data")
loader = LakeFSLoader(
self.lakefs_access_key, self.lakefs_secret_key, self.endpoint
)
loader.set_repo(self.repo)
loader.set_ref(self.ref)
loader.set_path(self.path)
loader.load()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_gitbook.py | from typing import Optional
import pytest
from langchain_community.document_loaders.gitbook import GitbookLoader
class TestGitbookLoader:
@pytest.mark.parametrize(
"web_page, load_all_paths, base_url, expected_web_path",
[
("https://example.com/page1", False, None, "https://example.com/page1"),
(
"https://example.com/",
True,
"https://example.com",
"https://example.com/sitemap.xml",
),
],
)
def test_init(
self,
web_page: str,
load_all_paths: bool,
base_url: Optional[str],
expected_web_path: str,
) -> None:
loader = GitbookLoader(
web_page, load_all_paths=load_all_paths, base_url=base_url
)
print(loader.__dict__) # noqa: T201
assert (
loader.base_url == (base_url or web_page)[:-1]
if (base_url or web_page).endswith("/")
else (base_url or web_page)
)
assert loader.web_path == expected_web_path
assert loader.load_all_paths == load_all_paths
@pytest.mark.parametrize(
"web_page, expected_number_results",
[("https://platform-docs.opentargets.org/getting-started", 1)],
)
def test_load_single_page(
self, web_page: str, expected_number_results: int
) -> None:
loader = GitbookLoader(web_page)
result = loader.load()
assert len(result) == expected_number_results
@pytest.mark.parametrize("web_page", [("https://platform-docs.opentargets.org/")])
def test_load_multiple_pages(self, web_page: str) -> None:
loader = GitbookLoader(web_page, load_all_paths=True)
result = loader.load()
print(len(result)) # noqa: T201
assert len(result) > 10
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_arxiv.py | import shutil
from http.client import HTTPMessage
from pathlib import Path
from typing import List, Union
from unittest.mock import patch
from urllib.error import HTTPError
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders.arxiv import ArxivLoader
EXAMPLE_HELLO_PDF_PATH = Path(__file__).parents[1] / "examples" / "hello.pdf"
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
def test_load_success() -> None:
"""Test that returns one document"""
loader = ArxivLoader(query="1605.08386", load_max_docs=2)
docs = loader.load()
assert len(docs) == 1
print(docs[0].metadata) # noqa: T201
print(docs[0].page_content) # noqa: T201
assert_docs(docs)
def test_load_returns_no_result() -> None:
"""Test that returns no docs"""
loader = ArxivLoader(query="1605.08386WWW", load_max_docs=2)
docs = loader.load()
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
loader = ArxivLoader(query="ChatGPT", load_max_docs=expected_docs)
docs = loader.load()
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
loader = ArxivLoader(query="ChatGPT", load_max_docs=1, load_all_available_meta=True)
docs = loader.load()
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Published", "Title", "Authors", "Summary"}
)
print(doc.metadata) # noqa: T201
assert len(set(doc.metadata)) > 4
def test_skip_http_error() -> None:
"""Test skipping unexpected Http 404 error of a single doc"""
tmp_hello_pdf_path = Path(__file__).parent / "hello.pdf"
def first_download_fails() -> Union[HTTPError, str]:
if not hasattr(first_download_fails, "firstCall"):
first_download_fails.__setattr__("firstCall", False)
raise HTTPError(
url="", code=404, msg="Not Found", hdrs=HTTPMessage(), fp=None
)
else:
# Return temporary example pdf path
shutil.copy(EXAMPLE_HELLO_PDF_PATH, tmp_hello_pdf_path)
return str(tmp_hello_pdf_path.absolute())
with patch("arxiv.Result.download_pdf") as mock_download_pdf:
# Set up the mock to raise HTTP 404 error
mock_download_pdf.side_effect = first_download_fails
# Load documents
loader = ArxivLoader(
query="ChatGPT",
load_max_docs=2,
load_all_available_meta=True,
continue_on_failure=True,
)
docs = loader.load()
# Only 1 of 2 documents should be loaded
assert len(docs) == 1
@pytest.mark.skip(reason="test could be flaky")
def test_load_issue_9046() -> None:
"""Test for the fixed issue 9046"""
expected_docs = 3
# ":" character could not be an issue
loader = ArxivLoader(
query="MetaGPT: Meta Programming for Multi-Agent Collaborative Framework",
load_max_docs=expected_docs,
)
docs = loader.load()
assert_docs(docs)
assert "MetaGPT" in docs[0].metadata["Title"]
# "-" character could not be an issue
loader = ArxivLoader(
query="MetaGPT - Meta Programming for Multi-Agent Collaborative Framework",
load_max_docs=expected_docs,
)
docs = loader.load()
assert_docs(docs)
assert "MetaGPT" in docs[0].metadata["Title"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_stripe.py | from langchain_community.document_loaders.stripe import StripeLoader
def test_stripe_loader() -> None:
"""Test Stripe file loader."""
stripe_loader = StripeLoader("charges")
documents = stripe_loader.load()
assert len(documents) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_odt.py | from pathlib import Path
from langchain_community.document_loaders import UnstructuredODTLoader
def test_unstructured_odt_loader() -> None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / "examples/fake.odt"
loader = UnstructuredODTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_xorbits.py | import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import XorbitsLoader
try:
import xorbits # noqa: F401
xorbits_installed = True
except ImportError:
xorbits_installed = False
@pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed")
def test_load_returns_list_of_documents() -> None:
import xorbits.pandas as pd
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
@pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed")
def test_load_converts_dataframe_columns_to_document_metadata() -> None:
import xorbits.pandas as pd
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
expected = {
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
for i, doc in enumerate(docs):
assert doc.metadata["author"] == expected["author"][i]
assert doc.metadata["date"] == expected["date"][i]
@pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed")
def test_load_uses_page_content_column_to_create_document_text() -> None:
import xorbits.pandas as pd
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
sample_data_frame = pd.DataFrame(data)
sample_data_frame = sample_data_frame.rename(columns={"text": "dummy_test_column"})
loader = XorbitsLoader(sample_data_frame, page_content_column="dummy_test_column")
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_url_playwright.py | """Tests for the Playwright URL loader"""
from typing import TYPE_CHECKING
from langchain_community.document_loaders import PlaywrightURLLoader
from langchain_community.document_loaders.url_playwright import PlaywrightEvaluator
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.async_api import Response as AsyncResponse
from playwright.sync_api import Browser, Page, Response
class TestEvaluator(PlaywrightEvaluator):
"""A simple evaluator for testing purposes."""
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
return "test"
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
return "test"
def test_playwright_url_loader() -> None:
"""Test Playwright URL loader."""
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
"https://techmeme.com",
"https://techcrunch.com",
]
loader = PlaywrightURLLoader(
urls=urls,
remove_selectors=["header", "footer"],
continue_on_failure=False,
headless=True,
)
docs = loader.load()
assert len(docs) > 0
async def test_playwright_async_url_loader() -> None:
"""Test Playwright async URL loader."""
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
"https://techmeme.com",
"https://techcrunch.com",
]
loader = PlaywrightURLLoader(
urls=urls,
remove_selectors=["header", "footer"],
continue_on_failure=False,
headless=True,
)
docs = await loader.aload()
assert len(docs) > 0
def test_playwright_url_loader_with_custom_evaluator() -> None:
"""Test Playwright URL loader with a custom evaluator."""
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
loader = PlaywrightURLLoader(
urls=urls,
evaluator=TestEvaluator(),
continue_on_failure=False,
headless=True,
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "test"
async def test_playwright_async_url_loader_with_custom_evaluator() -> None:
"""Test Playwright async URL loader with a custom evaluator."""
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
loader = PlaywrightURLLoader(
urls=urls,
evaluator=TestEvaluator(),
continue_on_failure=False,
headless=True,
)
docs = await loader.aload()
assert len(docs) == 1
assert docs[0].page_content == "test"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_url.py | import pytest
from langchain_community.document_loaders import UnstructuredURLLoader
def test_continue_on_failure_true() -> None:
"""Test exception is not raised when continue_on_failure=True."""
loader = UnstructuredURLLoader(["badurl.foobar"])
loader.load()
def test_continue_on_failure_false() -> None:
"""Test exception is raised when continue_on_failure=False."""
loader = UnstructuredURLLoader(["badurl.foobar"], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_excel.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredExcelLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_excel_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "stanley-cups.xlsx")
loader = UnstructuredExcelLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_confluence.py | import pytest
from langchain_community.document_loaders.confluence import ConfluenceLoader
try:
from atlassian import Confluence # noqa: F401
confluence_installed = True
except ImportError:
confluence_installed = False
@pytest.mark.skipif(not confluence_installed, reason="Atlassian package not installed")
def test_load_single_confluence_page() -> None:
loader = ConfluenceLoader(url="https://templates.atlassian.net/wiki/")
docs = loader.load(page_ids=["33189"])
assert len(docs) == 1
assert docs[0].page_content is not None
assert docs[0].metadata["id"] == "33189"
assert docs[0].metadata["title"] == "An easy intro to using Confluence"
assert docs[0].metadata["source"] == (
"https://templates.atlassian.net/wiki/"
"spaces/RD/pages/33189/An+easy+intro+to+using+Confluence"
)
@pytest.mark.skipif(not confluence_installed, reason="Atlassian package not installed")
def test_load_full_confluence_space() -> None:
loader = ConfluenceLoader(url="https://templates.atlassian.net/wiki/")
docs = loader.load(space_key="RD")
assert len(docs) == 14
assert docs[0].page_content is not None
@pytest.mark.skipif(not confluence_installed, reason="Atlassian package not installed")
def test_confluence_pagination() -> None:
loader = ConfluenceLoader(url="https://templates.atlassian.net/wiki/")
# this will issue 2 requests; each with a limit of 3 until the max_pages of 5 is met
docs = loader.load(space_key="RD", limit=3, max_pages=5)
assert len(docs) == 5
assert docs[0].page_content is not None
@pytest.mark.skipif(not confluence_installed, reason="Atlassian package not installed")
def test_pass_confluence_kwargs() -> None:
loader = ConfluenceLoader(
url="https://templates.atlassian.net/wiki/",
confluence_kwargs={"verify_ssl": False},
)
assert loader.confluence.verify_ssl is False
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_astradb.py | """
Test of Astra DB document loader class `AstraDBLoader`
Required to run this test:
- a recent `astrapy` Python package available
- an Astra DB instance;
- the two environment variables set:
export ASTRA_DB_API_ENDPOINT="https://<DB-ID>-us-east1.apps.astra.datastax.com"
export ASTRA_DB_APPLICATION_TOKEN="AstraCS:........."
- optionally this as well (otherwise defaults are used):
export ASTRA_DB_KEYSPACE="my_keyspace"
"""
from __future__ import annotations
import json
import os
import uuid
from typing import TYPE_CHECKING, AsyncIterator, Iterator
import pytest
from langchain_community.document_loaders.astradb import AstraDBLoader
if TYPE_CHECKING:
from astrapy.db import (
AstraDBCollection,
AsyncAstraDBCollection,
)
ASTRA_DB_APPLICATION_TOKEN = os.getenv("ASTRA_DB_APPLICATION_TOKEN")
ASTRA_DB_API_ENDPOINT = os.getenv("ASTRA_DB_API_ENDPOINT")
ASTRA_DB_KEYSPACE = os.getenv("ASTRA_DB_KEYSPACE")
def _has_env_vars() -> bool:
return all([ASTRA_DB_APPLICATION_TOKEN, ASTRA_DB_API_ENDPOINT])
@pytest.fixture
def astra_db_collection() -> Iterator[AstraDBCollection]:
from astrapy.db import AstraDB
astra_db = AstraDB(
token=ASTRA_DB_APPLICATION_TOKEN or "",
api_endpoint=ASTRA_DB_API_ENDPOINT or "",
namespace=ASTRA_DB_KEYSPACE,
)
collection_name = f"lc_test_loader_{str(uuid.uuid4()).split('-')[0]}"
collection = astra_db.create_collection(collection_name)
collection.insert_many([{"foo": "bar", "baz": "qux"}] * 20)
collection.insert_many(
[{"foo": "bar2", "baz": "qux"}] * 4 + [{"foo": "bar", "baz": "qux"}] * 4
)
yield collection
astra_db.delete_collection(collection_name)
@pytest.fixture
async def async_astra_db_collection() -> AsyncIterator[AsyncAstraDBCollection]:
from astrapy.db import AsyncAstraDB
astra_db = AsyncAstraDB(
token=ASTRA_DB_APPLICATION_TOKEN or "",
api_endpoint=ASTRA_DB_API_ENDPOINT or "",
namespace=ASTRA_DB_KEYSPACE,
)
collection_name = f"lc_test_loader_{str(uuid.uuid4()).split('-')[0]}"
collection = await astra_db.create_collection(collection_name)
await collection.insert_many([{"foo": "bar", "baz": "qux"}] * 20)
await collection.insert_many(
[{"foo": "bar2", "baz": "qux"}] * 4 + [{"foo": "bar", "baz": "qux"}] * 4
)
yield collection
await astra_db.delete_collection(collection_name)
@pytest.mark.requires("astrapy")
@pytest.mark.skipif(not _has_env_vars(), reason="Missing Astra DB env. vars")
class TestAstraDB:
def test_astradb_loader(self, astra_db_collection: AstraDBCollection) -> None:
loader = AstraDBLoader(
astra_db_collection.collection_name,
token=ASTRA_DB_APPLICATION_TOKEN,
api_endpoint=ASTRA_DB_API_ENDPOINT,
namespace=ASTRA_DB_KEYSPACE,
nb_prefetched=1,
projection={"foo": 1},
find_options={"limit": 22},
filter_criteria={"foo": "bar"},
)
docs = loader.load()
assert len(docs) == 22
ids = set()
for doc in docs:
content = json.loads(doc.page_content)
assert content["foo"] == "bar"
assert "baz" not in content
assert content["_id"] not in ids
ids.add(content["_id"])
assert doc.metadata == {
"namespace": astra_db_collection.astra_db.namespace,
"api_endpoint": astra_db_collection.astra_db.base_url,
"collection": astra_db_collection.collection_name,
}
def test_extraction_function(self, astra_db_collection: AstraDBCollection) -> None:
loader = AstraDBLoader(
astra_db_collection.collection_name,
token=ASTRA_DB_APPLICATION_TOKEN,
api_endpoint=ASTRA_DB_API_ENDPOINT,
namespace=ASTRA_DB_KEYSPACE,
find_options={"limit": 30},
extraction_function=lambda x: x["foo"],
)
docs = loader.lazy_load()
doc = next(docs)
assert doc.page_content == "bar"
async def test_astradb_loader_async(
self, async_astra_db_collection: AsyncAstraDBCollection
) -> None:
await async_astra_db_collection.insert_many([{"foo": "bar", "baz": "qux"}] * 20)
await async_astra_db_collection.insert_many(
[{"foo": "bar2", "baz": "qux"}] * 4 + [{"foo": "bar", "baz": "qux"}] * 4
)
loader = AstraDBLoader(
async_astra_db_collection.collection_name,
token=ASTRA_DB_APPLICATION_TOKEN,
api_endpoint=ASTRA_DB_API_ENDPOINT,
namespace=ASTRA_DB_KEYSPACE,
nb_prefetched=1,
projection={"foo": 1},
find_options={"limit": 22},
filter_criteria={"foo": "bar"},
)
docs = await loader.aload()
assert len(docs) == 22
ids = set()
for doc in docs:
content = json.loads(doc.page_content)
assert content["foo"] == "bar"
assert "baz" not in content
assert content["_id"] not in ids
ids.add(content["_id"])
assert doc.metadata == {
"namespace": async_astra_db_collection.astra_db.namespace,
"api_endpoint": async_astra_db_collection.astra_db.base_url,
"collection": async_astra_db_collection.collection_name,
}
async def test_extraction_function_async(
self, async_astra_db_collection: AsyncAstraDBCollection
) -> None:
loader = AstraDBLoader(
async_astra_db_collection.collection_name,
token=ASTRA_DB_APPLICATION_TOKEN,
api_endpoint=ASTRA_DB_API_ENDPOINT,
namespace=ASTRA_DB_KEYSPACE,
find_options={"limit": 30},
extraction_function=lambda x: x["foo"],
)
doc = await loader.alazy_load().__anext__()
assert doc.page_content == "bar"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_whatsapp_chat.py | from pathlib import Path
from langchain_community.document_loaders import WhatsAppChatLoader
def test_whatsapp_chat_loader() -> None:
"""Test WhatsAppChatLoader."""
file_path = Path(__file__).parent.parent / "examples" / "whatsapp_chat.txt"
loader = WhatsAppChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"James on 05.05.23, 15:48:11: Hi here\n\n"
"User name on 11/8/21, 9:41:32 AM: Message 123\n\n"
"User 2 on 1/23/23, 3:19 AM: Bye!\n\n"
"User 1 on 1/23/23, 3:22_AM: And let me know if anything changes\n\n"
"~ User name 2 on 1/24/21, 12:41:03 PM: Of course!\n\n"
"~ User 2 on 2023/5/4, 16:13:23: See you!\n\n"
"User 1 on 7/19/22, 11:32 PM: Hello\n\n"
"User 2 on 7/20/22, 11:32 am: Goodbye\n\n"
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_google_speech_to_text.py | """Test Google Speech-to-Text document loader.
You need to create a Google Cloud project and enable the Speech-to-Text API to run the
integration tests.
Follow the instructions in the example notebook:
google_speech_to_text.ipynb
to set up the app and configure authentication.
"""
import pytest
from langchain_community.document_loaders.google_speech_to_text import (
GoogleSpeechToTextLoader,
)
@pytest.mark.requires("google.api_core")
def test_initialization() -> None:
loader = GoogleSpeechToTextLoader(
project_id="test_project_id", file_path="./testfile.mp3"
)
assert loader.project_id == "test_project_id"
assert loader.file_path == "./testfile.mp3"
assert loader.location == "us-central1"
assert loader.recognizer_id == "_"
@pytest.mark.requires("google.api_core")
def test_load() -> None:
loader = GoogleSpeechToTextLoader(
project_id="test_project_id", file_path="./testfile.mp3"
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "Test transcription text"
assert docs[0].metadata["language_code"] == "en-US"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_docusaurus.py | from pathlib import Path
from langchain_community.document_loaders import DocusaurusLoader
DOCS_URL = str(Path(__file__).parent.parent / "examples/docusaurus-sitemap.xml")
def test_docusarus() -> None:
"""Test sitemap loader."""
loader = DocusaurusLoader(DOCS_URL, is_local=True)
documents = loader.load()
assert len(documents) > 1
assert "🦜️🔗 Langchain" in documents[0].page_content
def test_filter_docusaurus_sitemap() -> None:
"""Test sitemap loader."""
loader = DocusaurusLoader(
DOCS_URL,
is_local=True,
filter_urls=[
"https://python.langchain.com/docs/integrations/document_loaders/sitemap"
],
)
documents = loader.load()
assert len(documents) == 1
assert "SitemapLoader" in documents[0].page_content
def test_docusarus_metadata() -> None:
def sitemap_metadata_one(meta: dict, _content: None) -> dict:
return {**meta, "mykey": "Super Important Metadata"}
"""Test sitemap loader."""
loader = DocusaurusLoader(
DOCS_URL,
is_local=True,
meta_function=sitemap_metadata_one,
)
documents = loader.load()
assert len(documents) > 1
assert "mykey" in documents[0].metadata
assert "Super Important Metadata" in documents[0].metadata["mykey"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import PubMedLoader
xmltodict = pytest.importorskip("xmltodict")
def test_load_success() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt")
docs = api_client.load()
print(docs) # noqa: T201
assert len(docs) == api_client.load_max_docs == 3
assert_docs(docs)
def test_load_success_load_max_docs() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
docs = api_client.load()
print(docs) # noqa: T201
assert len(docs) == api_client.load_max_docs == 2
assert_docs(docs)
def test_load_returns_no_result() -> None:
"""Test that gives no result."""
api_client = PubMedLoader(query="1605.08386WWW")
docs = api_client.load()
assert len(docs) == 0
def test_load_no_content() -> None:
"""Returns a Document without content."""
api_client = PubMedLoader(query="37548971")
docs = api_client.load()
print(docs) # noqa: T201
assert len(docs) > 0
assert docs[0].page_content == ""
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_tsv.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredTSVLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_tsv_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "stanley-cups.tsv")
loader = UnstructuredTSVLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_email.py | from pathlib import Path
from langchain_community.document_loaders import (
OutlookMessageLoader,
UnstructuredEmailLoader,
)
def test_outlook_message_loader() -> None:
"""Test OutlookMessageLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.msg"
loader = OutlookMessageLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["subject"] == "Test for TIF files"
assert docs[0].metadata["sender"] == "Brian Zhou <brizhou@gmail.com>"
assert docs[0].metadata["date"] == "Mon, 18 Nov 2013 16:26:24 +0800"
assert docs[0].page_content == (
"This is a test email to experiment with the MS Outlook MSG "
"Extractor\r\n\r\n\r\n-- \r\n\r\n\r\nKind regards"
"\r\n\r\n\r\n\r\n\r\nBrian Zhou\r\n\r\n"
)
def test_unstructured_email_loader_with_attachments() -> None:
file_path = Path(__file__).parent.parent / "examples/fake-email-attachment.eml"
loader = UnstructuredEmailLoader(
str(file_path), mode="elements", process_attachments=True
)
docs = loader.load()
assert docs[-1].page_content == "Hey this is a fake attachment!"
assert docs[-1].metadata["filename"] == "fake-attachment.txt"
assert docs[-1].metadata["source"].endswith("fake-email-attachment.eml")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_bigquery.py | import pytest
from langchain_community.document_loaders.bigquery import BigQueryLoader
try:
from google.cloud import bigquery # noqa: F401
bigquery_installed = True
except ImportError:
bigquery_installed = False
@pytest.mark.skipif(not bigquery_installed, reason="bigquery not installed")
def test_bigquery_loader_no_options() -> None:
loader = BigQueryLoader("SELECT 1 AS a, 2 AS b")
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1\nb: 2"
assert docs[0].metadata == {}
@pytest.mark.skipif(not bigquery_installed, reason="bigquery not installed")
def test_bigquery_loader_page_content_columns() -> None:
loader = BigQueryLoader(
"SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS a, 4 AS b",
page_content_columns=["a"],
)
docs = loader.load()
assert len(docs) == 2
assert docs[0].page_content == "a: 1"
assert docs[0].metadata == {}
assert docs[1].page_content == "a: 3"
assert docs[1].metadata == {}
@pytest.mark.skipif(not bigquery_installed, reason="bigquery not installed")
def test_bigquery_loader_metadata_columns() -> None:
loader = BigQueryLoader(
"SELECT 1 AS a, 2 AS b",
page_content_columns=["a"],
metadata_columns=["b"],
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "a: 1"
assert docs[0].metadata == {"b": 2}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_fauna.py | import unittest
from langchain_community.document_loaders.fauna import FaunaLoader
try:
import fauna # noqa: F401
fauna_installed = True
except ImportError:
fauna_installed = False
@unittest.skipIf(not fauna_installed, "fauna not installed")
class TestFaunaLoader(unittest.TestCase):
def setUp(self) -> None:
self.fauna_secret = "<enter-valid-fauna-secret>"
self.valid_fql_query = "Item.all()"
self.valid_page_content_field = "text"
self.valid_metadata_fields = ["valid_metadata_fields"]
def test_fauna_loader(self) -> None:
"""Test Fauna loader."""
loader = FaunaLoader(
query=self.valid_fql_query,
page_content_field=self.valid_page_content_field,
secret=self.fauna_secret,
metadata_fields=self.valid_metadata_fields,
)
docs = loader.load()
assert len(docs) > 0 # assuming the query returns at least one document
for doc in docs:
assert (
doc.page_content != ""
) # assuming that every document has page_content
assert (
"id" in doc.metadata and doc.metadata["id"] != ""
) # assuming that every document has 'id'
assert (
"ts" in doc.metadata and doc.metadata["ts"] != ""
) # assuming that every document has 'ts'
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_oracleds.py | # Authors:
# Sudhir Kumar (sudhirkk)
#
# -----------------------------------------------------------------------------
# test_oracleds.py
# -----------------------------------------------------------------------------
import sys
from langchain_community.document_loaders.oracleai import (
OracleDocLoader,
OracleTextSplitter,
)
from langchain_community.utilities.oracleai import OracleSummary
from langchain_community.vectorstores.oraclevs import (
_table_exists,
drop_table_purge,
)
uname = "hr"
passwd = "hr"
# uname = "LANGCHAINUSER"
# passwd = "langchainuser"
v_dsn = "100.70.107.245:1521/cdb1_pdb1.regress.rdbms.dev.us.oracle.com"
### Test loader #####
def test_loader_test() -> None:
try:
import oracledb
except ImportError:
return
try:
# oracle connection
connection = oracledb.connect(user=uname, password=passwd, dsn=v_dsn)
cursor = connection.cursor()
if _table_exists(connection, "LANGCHAIN_DEMO"):
drop_table_purge(connection, "LANGCHAIN_DEMO")
cursor.execute("CREATE TABLE langchain_demo(id number, text varchar2(25))")
rows = [
(1, "First"),
(2, "Second"),
(3, "Third"),
(4, "Fourth"),
(5, "Fifth"),
(6, "Sixth"),
(7, "Seventh"),
]
cursor.executemany("insert into LANGCHAIN_DEMO(id, text) values (:1, :2)", rows)
connection.commit()
# local file, local directory, database column
loader_params = {
"owner": uname,
"tablename": "LANGCHAIN_DEMO",
"colname": "TEXT",
}
# instantiate
loader = OracleDocLoader(conn=connection, params=loader_params)
# load
docs = loader.load()
# verify
if len(docs) == 0:
sys.exit(1)
if _table_exists(connection, "LANGCHAIN_DEMO"):
drop_table_purge(connection, "LANGCHAIN_DEMO")
except Exception:
sys.exit(1)
try:
# expectation : ORA-00942
loader_params = {
"owner": uname,
"tablename": "COUNTRIES1",
"colname": "COUNTRY_NAME",
}
# instantiate
loader = OracleDocLoader(conn=connection, params=loader_params)
# load
docs = loader.load()
if len(docs) == 0:
pass
except Exception:
pass
try:
# expectation : file "SUDHIR" doesn't exist.
loader_params = {"file": "SUDHIR"}
# instantiate
loader = OracleDocLoader(conn=connection, params=loader_params)
# load
docs = loader.load()
if len(docs) == 0:
pass
except Exception:
pass
try:
# expectation : path "SUDHIR" doesn't exist.
loader_params = {"dir": "SUDHIR"}
# instantiate
loader = OracleDocLoader(conn=connection, params=loader_params)
# load
docs = loader.load()
if len(docs) == 0:
pass
except Exception:
pass
### Test splitter ####
def test_splitter_test() -> None:
try:
import oracledb
except ImportError:
return
try:
# oracle connection
connection = oracledb.connect(user=uname, password=passwd, dsn=v_dsn)
doc = """Langchain is a wonderful framework to load, split, chunk
and embed your data!!"""
# by words , max = 1000
splitter_params = {
"by": "words",
"max": "1000",
"overlap": "200",
"split": "custom",
"custom_list": [","],
"extended": "true",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
sys.exit(1)
# by chars , max = 4000
splitter_params = {
"by": "chars",
"max": "4000",
"overlap": "800",
"split": "NEWLINE",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
sys.exit(1)
# by words , max = 10
splitter_params = {
"by": "words",
"max": "10",
"overlap": "2",
"split": "SENTENCE",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
sys.exit(1)
# by chars , max = 50
splitter_params = {
"by": "chars",
"max": "50",
"overlap": "10",
"split": "SPACE",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
sys.exit(1)
except Exception:
sys.exit(1)
try:
# ORA-20003: invalid value xyz for BY parameter
splitter_params = {"by": "xyz"}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
pass
except Exception:
pass
try:
# Expectation: ORA-30584: invalid text chunking MAXIMUM - '10'
splitter_params = {
"by": "chars",
"max": "10",
"overlap": "2",
"split": "SPACE",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
pass
except Exception:
pass
try:
# Expectation: ORA-30584: invalid text chunking MAXIMUM - '5'
splitter_params = {
"by": "words",
"max": "5",
"overlap": "2",
"split": "SPACE",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
pass
except Exception:
pass
try:
# Expectation: ORA-30586: invalid text chunking SPLIT BY - SENTENCE
splitter_params = {
"by": "words",
"max": "50",
"overlap": "2",
"split": "SENTENCE",
"normalize": "all",
}
# instantiate
splitter = OracleTextSplitter(conn=connection, params=splitter_params)
# generate chunks
chunks = splitter.split_text(doc)
# verify
if len(chunks) == 0:
pass
except Exception:
pass
#### Test summary ####
def test_summary_test() -> None:
try:
import oracledb
except ImportError:
return
try:
# oracle connection
connection = oracledb.connect(user=uname, password=passwd, dsn=v_dsn)
# provider : Database, glevel : Paragraph
summary_params = {
"provider": "database",
"glevel": "paragraph",
"numParagraphs": 2,
"language": "english",
}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
doc = """It was 7 minutes after midnight. The dog was lying on the grass in
of the lawn in front of Mrs Shears house. Its eyes were closed. It
was running on its side, the way dogs run when they think they are
cat in a dream. But the dog was not running or asleep. The dog was dead.
was a garden fork sticking out of the dog. The points of the fork must
gone all the way through the dog and into the ground because the fork
not fallen over. I decided that the dog was probably killed with the
because I could not see any other wounds in the dog and I do not think
would stick a garden fork into a dog after it had died for some other
like cancer for example, or a road accident. But I could not be certain"""
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
sys.exit(1)
# provider : Database, glevel : Sentence
summary_params = {"provider": "database", "glevel": "Sentence"}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
sys.exit(1)
# provider : Database, glevel : P
summary_params = {"provider": "database", "glevel": "P"}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
sys.exit(1)
# provider : Database, glevel : S
summary_params = {
"provider": "database",
"glevel": "S",
"numParagraphs": 16,
"language": "english",
}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
sys.exit(1)
# provider : Database, glevel : S, doc = ' '
summary_params = {"provider": "database", "glevel": "S", "numParagraphs": 2}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
doc = " "
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
sys.exit(1)
except Exception:
sys.exit(1)
try:
# Expectation : DRG-11002: missing value for PROVIDER
summary_params = {"provider": "database1", "glevel": "S"}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
pass
except Exception:
pass
try:
# Expectation : DRG-11425: gist level SUDHIR is invalid,
# DRG-11427: valid gist level values are S, P
summary_params = {"provider": "database", "glevel": "SUDHIR"}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
pass
except Exception:
pass
try:
# Expectation : DRG-11441: gist numParagraphs -2 is invalid
summary_params = {"provider": "database", "glevel": "S", "numParagraphs": -2}
# summary
summary = OracleSummary(conn=connection, params=summary_params)
summaries = summary.get_summary(doc)
# verify
if len(summaries) == 0:
pass
except Exception:
pass
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_dataframe.py | import pandas as pd
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import DataFrameLoader
@pytest.fixture
def sample_data_frame() -> pd.DataFrame:
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
return pd.DataFrame(data)
def test_load_returns_list_of_documents(sample_data_frame: pd.DataFrame) -> None:
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
def test_load_converts_dataframe_columns_to_document_metadata(
sample_data_frame: pd.DataFrame,
) -> None:
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata["author"] == sample_data_frame.loc[i, "author"]
assert doc.metadata["date"] == sample_data_frame.loc[i, "date"]
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pd.DataFrame,
) -> None:
sample_data_frame = sample_data_frame.rename(columns={"text": "dummy_test_column"})
loader = DataFrameLoader(sample_data_frame, page_content_column="dummy_test_column")
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_news.py | import random
import pytest
import requests
from langchain_community.document_loaders import NewsURLLoader
def get_random_news_url() -> str:
from bs4 import BeautifulSoup
response = requests.get("https://news.google.com")
soup = BeautifulSoup(response.text, "html.parser")
article_links = [
a["href"] for a in soup.find_all("a", href=True) if "/articles/" in a["href"]
]
random_article_link = random.choice(article_links)
return "https://news.google.com" + random_article_link
def test_news_loader() -> None:
loader = NewsURLLoader([get_random_news_url()])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
def test_news_loader_with_nlp() -> None:
loader = NewsURLLoader([get_random_news_url()], nlp=True)
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
assert "keywords" in metadata
assert "summary" in metadata
def test_continue_on_failure_true() -> None:
"""Test exception is not raised when continue_on_failure=True."""
loader = NewsURLLoader(["badurl.foobar"])
loader.load()
def test_continue_on_failure_false() -> None:
"""Test exception is raised when continue_on_failure=False."""
loader = NewsURLLoader(["badurl.foobar"], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_org_mode.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredOrgModeLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_org_mode_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "README.org")
loader = UnstructuredOrgModeLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_unstructured.py | import os
from contextlib import ExitStack
from pathlib import Path
from langchain_community.document_loaders import (
UnstructuredAPIFileIOLoader,
UnstructuredAPIFileLoader,
UnstructuredFileLoader,
)
EXAMPLE_DOCS_DIRECTORY = str(Path(__file__).parent.parent / "examples/")
def test_unstructured_loader_with_post_processor() -> None:
def add_the_end(text: str) -> str:
return text + "THE END!"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
loader = UnstructuredFileLoader(
file_path=file_path,
post_processors=[add_the_end],
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
assert docs[0].page_content.endswith("THE END!")
def test_unstructured_file_loader_multiple_files() -> None:
"""Test unstructured loader."""
file_paths = [
os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf"),
os.path.join(EXAMPLE_DOCS_DIRECTORY, "whatsapp_chat.txt"),
]
loader = UnstructuredFileLoader(
file_path=file_paths,
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
loader = UnstructuredAPIFileLoader(
file_path=file_path,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_loader_multiple_files() -> None:
"""Test unstructured loader."""
file_paths = [
os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf"),
os.path.join(EXAMPLE_DOCS_DIRECTORY, "whatsapp_chat.txt"),
]
loader = UnstructuredAPIFileLoader(
file_path=file_paths,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_io_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
with open(file_path, "rb") as f:
loader = UnstructuredAPIFileIOLoader(
file=f,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
file_filename=file_path,
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_loader_io_multiple_files() -> None:
"""Test unstructured loader."""
file_paths = [
os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf"),
os.path.join(EXAMPLE_DOCS_DIRECTORY, "whatsapp_chat.txt"),
]
with ExitStack() as stack:
files = [stack.enter_context(open(file_path, "rb")) for file_path in file_paths]
loader = UnstructuredAPIFileIOLoader(
file=files, # type: ignore
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
file_filenames=file_paths,
)
docs = loader.load()
assert len(docs) > 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_rocksetdb.py | import logging
import os
from langchain_core.documents import Document
from langchain_community.document_loaders import RocksetLoader
logger = logging.getLogger(__name__)
def test_sql_query() -> None:
import rockset
assert os.environ.get("ROCKSET_API_KEY") is not None
assert os.environ.get("ROCKSET_REGION") is not None
api_key = os.environ.get("ROCKSET_API_KEY")
region = os.environ.get("ROCKSET_REGION")
if region == "use1a1":
host = rockset.Regions.use1a1
elif region == "usw2a1":
host = rockset.Regions.usw2a1
elif region == "euc1a1":
host = rockset.Regions.euc1a1
elif region == "dev":
host = rockset.DevRegions.usw2a1
else:
logger.warning(
"Using ROCKSET_REGION:%s as it is.. \
You should know what you're doing...",
region,
)
host = region
client = rockset.RocksetClient(host, api_key)
col_1 = "Rockset is a real-time analytics database"
col_2 = 2
col_3 = "e903e069-b0b5-4b80-95e2-86471b41f55f"
id = 7320132
"""Run a simple SQL query"""
loader = RocksetLoader(
client,
rockset.models.QueryRequestSql(
query=(
f"SELECT '{col_1}' AS col_1, {col_2} AS col_2, '{col_3}' AS col_3,"
f" {id} AS id"
)
),
["col_1"],
metadata_keys=["col_2", "col_3", "id"],
)
output = loader.load()
assert len(output) == 1
assert isinstance(output[0], Document)
assert output[0].page_content == col_1
assert output[0].metadata == {"col_2": col_2, "col_3": col_3, "id": id}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_bilibili.py | from langchain_community.document_loaders import BiliBiliLoader
def test_bilibili_loader() -> None:
"""Test Bilibili Loader."""
loader = BiliBiliLoader(
[
"https://www.bilibili.com/video/BV1xt411o7Xu/",
"https://www.bilibili.com/video/av330407025/",
]
)
docs = loader.load()
assert len(docs) == 2
assert docs[0].metadata["aid"] == 34218168
assert docs[1].metadata["videos"] == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_xml.py | import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredXMLLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_xml_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "factbook.xml")
loader = UnstructuredXMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_tidb.py | import os
import pytest
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine
from langchain_community.document_loaders import TiDBLoader
try:
CONNECTION_STRING = os.getenv("TEST_TiDB_CONNECTION_URL", "")
if CONNECTION_STRING == "":
raise OSError("TEST_TiDB_URL environment variable is not set")
tidb_available = True
except (OSError, ImportError):
tidb_available = False
@pytest.mark.skipif(not tidb_available, reason="tidb is not available")
def test_load_documents() -> None:
"""Test loading documents from TiDB."""
# Connect to the database
engine = create_engine(CONNECTION_STRING)
metadata = MetaData()
table_name = "tidb_loader_intergration_test"
# Create a test table
test_table = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("description", String(255)),
)
metadata.create_all(engine)
with engine.connect() as connection:
transaction = connection.begin()
try:
connection.execute(
test_table.insert(),
[
{"name": "Item 1", "description": "Description of Item 1"},
{"name": "Item 2", "description": "Description of Item 2"},
{"name": "Item 3", "description": "Description of Item 3"},
],
)
transaction.commit()
except:
transaction.rollback()
raise
loader = TiDBLoader(
connection_string=CONNECTION_STRING,
query=f"SELECT * FROM {table_name};",
page_content_columns=["name", "description"],
metadata_columns=["id"],
)
documents = loader.load()
test_table.drop(bind=engine)
# check
assert len(documents) == 3
assert (
documents[0].page_content == "name: Item 1\ndescription: Description of Item 1"
)
assert documents[0].metadata == {"id": 1}
assert (
documents[1].page_content == "name: Item 2\ndescription: Description of Item 2"
)
assert documents[1].metadata == {"id": 2}
assert (
documents[2].page_content == "name: Item 3\ndescription: Description of Item 3"
)
assert documents[2].metadata == {"id": 3}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_wikipedia.py | """Integration test for Wikipedia Document Loader."""
from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders import WikipediaLoader
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success() -> None:
loader = WikipediaLoader(query="HUNTER X HUNTER")
docs = loader.load()
assert len(docs) > 1
assert len(docs) <= 25
assert_docs(docs, all_meta=False)
def test_load_success_all_meta() -> None:
load_max_docs = 5
load_all_available_meta = True
loader = WikipediaLoader(
query="HUNTER X HUNTER",
load_max_docs=load_max_docs,
load_all_available_meta=load_all_available_meta,
)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=load_all_available_meta)
def test_load_success_more() -> None:
load_max_docs = 10
loader = WikipediaLoader(query="HUNTER X HUNTER", load_max_docs=load_max_docs)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=False)
def test_load_no_result() -> None:
loader = WikipediaLoader(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
docs = loader.load()
assert not docs
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_figma.py | from langchain_community.document_loaders.figma import FigmaFileLoader
ACCESS_TOKEN = ""
IDS = ""
KEY = ""
def test_figma_file_loader() -> None:
"""Test Figma file loader."""
loader = FigmaFileLoader(ACCESS_TOKEN, IDS, KEY)
docs = loader.load()
assert len(docs) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_cassandra.py | """
Test of Cassandra document loader class `CassandraLoader`
"""
import os
from typing import Any, Iterator
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders.cassandra import CassandraLoader
CASSANDRA_DEFAULT_KEYSPACE = "docloader_test_keyspace"
CASSANDRA_TABLE = "docloader_test_table"
@pytest.fixture(autouse=True, scope="session")
def keyspace() -> Iterator[str]:
import cassio
from cassandra.cluster import Cluster
from cassio.config import check_resolve_session, resolve_keyspace
from cassio.table.tables import PlainCassandraTable
if any(
env_var in os.environ
for env_var in [
"CASSANDRA_CONTACT_POINTS",
"ASTRA_DB_APPLICATION_TOKEN",
"ASTRA_DB_INIT_STRING",
]
):
cassio.init(auto=True)
session = check_resolve_session()
else:
cluster = Cluster()
session = cluster.connect()
keyspace = resolve_keyspace() or CASSANDRA_DEFAULT_KEYSPACE
cassio.init(session=session, keyspace=keyspace)
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} "
f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
)
# We use a cassio table by convenience to seed the DB
table = PlainCassandraTable(
table=CASSANDRA_TABLE, keyspace=keyspace, session=session
)
table.put(row_id="id1", body_blob="text1")
table.put(row_id="id2", body_blob="text2")
yield keyspace
session.execute(f"DROP TABLE IF EXISTS {keyspace}.{CASSANDRA_TABLE}")
async def test_loader_table(keyspace: str) -> None:
loader = CassandraLoader(table=CASSANDRA_TABLE)
expected = [
Document(
page_content="Row(row_id='id1', body_blob='text1')",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
Document(
page_content="Row(row_id='id2', body_blob='text2')",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_query(keyspace: str) -> None:
loader = CassandraLoader(
query=f"SELECT body_blob FROM {keyspace}.{CASSANDRA_TABLE}"
)
expected = [
Document(page_content="Row(body_blob='text1')"),
Document(page_content="Row(body_blob='text2')"),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_page_content_mapper(keyspace: str) -> None:
def mapper(row: Any) -> str:
return str(row.body_blob)
loader = CassandraLoader(table=CASSANDRA_TABLE, page_content_mapper=mapper)
expected = [
Document(
page_content="text1",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
Document(
page_content="text2",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_metadata_mapper(keyspace: str) -> None:
def mapper(row: Any) -> dict:
return {"id": row.row_id}
loader = CassandraLoader(table=CASSANDRA_TABLE, metadata_mapper=mapper)
expected = [
Document(
page_content="Row(row_id='id1', body_blob='text1')",
metadata={
"table": CASSANDRA_TABLE,
"keyspace": keyspace,
"id": "id1",
},
),
Document(
page_content="Row(row_id='id2', body_blob='text2')",
metadata={
"table": CASSANDRA_TABLE,
"keyspace": keyspace,
"id": "id2",
},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_facebook_chat.py | from pathlib import Path
from langchain_community.document_loaders import FacebookChatLoader
def test_facebook_chat_loader() -> None:
"""Test FacebookChatLoader."""
file_path = Path(__file__).parent.parent / "examples/facebook_chat.json"
loader = FacebookChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"User 2 on 2023-02-05 13:46:11: Bye!\n\n"
"User 1 on 2023-02-05 13:43:55: Oh no worries! Bye\n\n"
"User 2 on 2023-02-05 13:24:37: No Im sorry it was my mistake, "
"the blue one is not for sale\n\n"
"User 1 on 2023-02-05 13:05:40: I thought you were selling the blue one!\n\n"
"User 1 on 2023-02-05 13:05:09: Im not interested in this bag. "
"Im interested in the blue one!\n\n"
"User 2 on 2023-02-05 13:04:28: Here is $129\n\n"
"User 2 on 2023-02-05 13:04:05: Online is at least $100\n\n"
"User 1 on 2023-02-05 12:59:59: How much do you want?\n\n"
"User 2 on 2023-02-05 08:17:56: Goodmorning! $50 is too low.\n\n"
"User 1 on 2023-02-05 00:17:02: Hi! Im interested in your bag. "
"Im offering $50. Let me know if you are interested. Thanks!\n\n"
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_dedoc.py | import os
from pathlib import Path
from langchain_community.document_loaders import (
DedocAPIFileLoader,
DedocFileLoader,
DedocPDFLoader,
)
EXAMPLE_DOCS_DIRECTORY = str(Path(__file__).parent.parent / "examples/")
FILE_NAMES = [
"example.html",
"example.json",
"fake-email-attachment.eml",
"layout-parser-paper.pdf",
"slack_export.zip",
"stanley-cups.csv",
"stanley-cups.xlsx",
"whatsapp_chat.txt",
]
def test_dedoc_file_loader() -> None:
for file_name in FILE_NAMES:
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(
file_path,
split="document",
with_tables=False,
pdf_with_text_layer="tabby",
pages=":1",
)
docs = loader.load()
assert len(docs) == 1
def test_dedoc_pdf_loader() -> None:
file_name = "layout-parser-paper.pdf"
for mode in ("true", "tabby"):
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocPDFLoader(
file_path,
split="document",
with_tables=False,
pdf_with_text_layer=mode,
pages=":1",
)
docs = loader.load()
assert len(docs) == 1
def test_dedoc_content_html() -> None:
file_name = "example.html"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(
file_path,
split="line",
with_tables=False,
)
docs = loader.load()
assert docs[0].metadata["file_name"] == "example.html"
assert docs[0].metadata["file_type"] == "text/html"
assert "Instead of drinking water from the cat bowl" in docs[0].page_content
assert "Chase the red dot" not in docs[0].page_content
def test_dedoc_content_pdf() -> None:
file_name = "layout-parser-paper.pdf"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(
file_path, split="page", pdf_with_text_layer="tabby", pages=":5"
)
docs = loader.load()
table_list = [item for item in docs if item.metadata.get("type", "") == "table"]
assert len(docs) == 6
assert docs[0].metadata["file_name"] == "layout-parser-paper.pdf"
assert docs[0].metadata["file_type"] == "application/pdf"
assert "This paper introduces LayoutParser, an open-source" in docs[0].page_content
assert "layout detection [38, 22], table detection [26]" in docs[1].page_content
assert "LayoutParser: A Unified Toolkit for DL-Based DIA" in docs[2].page_content
assert len(table_list) > 0
assert (
'\n<tbody>\n<tr>\n<td colspan="1" rowspan="1">'
in table_list[0].metadata["text_as_html"]
)
def test_dedoc_content_json() -> None:
file_name = "example.json"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(file_path, split="node")
docs = loader.load()
assert len(docs) == 11
assert docs[0].metadata["file_name"] == "example.json"
assert docs[0].metadata["file_type"] == "application/json"
assert "Bye!" in docs[0].page_content
def test_dedoc_content_txt() -> None:
file_name = "whatsapp_chat.txt"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(file_path, split="line")
docs = loader.load()
assert len(docs) == 10
assert docs[0].metadata["file_name"] == "whatsapp_chat.txt"
assert docs[0].metadata["file_type"] == "text/plain"
assert "[05.05.23, 15:48:11] James: Hi here" in docs[0].page_content
assert "[11/8/21, 9:41:32 AM] User name: Message 123" in docs[1].page_content
assert "1/23/23, 3:19 AM - User 2: Bye!" in docs[2].page_content
def test_dedoc_table_handling() -> None:
file_name = "stanley-cups.csv"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocFileLoader(file_path, split="document")
docs = loader.load()
assert len(docs) == 2
assert docs[0].metadata["file_name"] == "stanley-cups.csv"
assert docs[0].metadata["file_type"] == "text/csv"
assert docs[1].metadata["type"] == "table"
assert '<td colspan="1" rowspan="1">1</td>' in docs[1].metadata["text_as_html"]
assert "Maple Leafs\tTOR\t13" in docs[1].page_content
def test_dedoc_api_file_loader() -> None:
file_name = "whatsapp_chat.txt"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, file_name)
loader = DedocAPIFileLoader(
file_path, split="line", url="https://dedoc-readme.hf.space"
)
docs = loader.load()
assert len(docs) == 10
assert docs[0].metadata["file_name"] == "whatsapp_chat.txt"
assert docs[0].metadata["file_type"] == "text/plain"
assert "[05.05.23, 15:48:11] James: Hi here" in docs[0].page_content
assert "[11/8/21, 9:41:32 AM] User name: Message 123" in docs[1].page_content
assert "1/23/23, 3:19 AM - User 2: Bye!" in docs[2].page_content
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_couchbase.py | import unittest
from langchain_community.document_loaders.couchbase import CouchbaseLoader
try:
import couchbase # noqa: F401
couchbase_installed = True
except ImportError:
couchbase_installed = False
@unittest.skipIf(not couchbase_installed, "couchbase not installed")
class TestCouchbaseLoader(unittest.TestCase):
def setUp(self) -> None:
self.conn_string = "<enter-valid-couchbase-connection-string>"
self.database_user = "<enter-valid-couchbase-user>"
self.database_password = "<enter-valid-couchbase-password>"
self.valid_query = "select h.* from `travel-sample`.inventory.hotel h limit 10"
self.valid_page_content_fields = ["country", "name", "description"]
self.valid_metadata_fields = ["id"]
def test_couchbase_loader(self) -> None:
"""Test Couchbase loader."""
loader = CouchbaseLoader(
connection_string=self.conn_string,
db_username=self.database_user,
db_password=self.database_password,
query=self.valid_query,
page_content_fields=self.valid_page_content_fields,
metadata_fields=self.valid_metadata_fields,
)
docs = loader.load()
print(docs) # noqa: T201
assert len(docs) > 0 # assuming the query returns at least one document
for doc in docs:
print(doc) # noqa: T201
assert (
doc.page_content != ""
) # assuming that every document has page_content
assert (
"id" in doc.metadata and doc.metadata["id"] != ""
) # assuming that every document has 'id'
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/__init__.py | """Test document loader integrations."""
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_telegram.py | from pathlib import Path
import pytest
from langchain_community.document_loaders import (
TelegramChatApiLoader,
TelegramChatFileLoader,
)
def test_telegram_chat_file_loader() -> None:
"""Test TelegramChatFileLoader."""
file_path = Path(__file__).parent / "test_docs" / "telegram.json"
loader = TelegramChatFileLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"Henry on 2020-01-01T00:00:02: It's 2020...\n\n"
"Henry on 2020-01-01T00:00:04: Fireworks!\n\n"
"Grace 🧤 ðŸ\x8d’ on 2020-01-01T00:00:05: You're a minute late!\n\n"
)
@pytest.mark.requires("telethon", "pandas")
def test_telegram_channel_loader_parsing() -> None:
"""Test TelegramChatApiLoader."""
file_path = Path(__file__).parent / "test_docs" / "telegram_channel.json"
# if we don't provide any value, it will skip fetching from telegram
# and will check the parsing logic.
loader = TelegramChatApiLoader(file_path=str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content) # noqa: T201
assert docs[0].page_content == (
"Hello, world!.\nLLMs are awesome! Langchain is great. Telegram is the best!."
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_spreedly.py | from langchain_community.document_loaders.spreedly import SpreedlyLoader
def test_spreedly_loader() -> None:
"""Test Spreedly Loader."""
access_token = ""
resource = "gateways_options"
spreedly_loader = SpreedlyLoader(access_token, resource)
documents = spreedly_loader.load()
assert len(documents) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_sitemap.py | from pathlib import Path
from typing import Any
import pytest
from langchain_community.document_loaders import SitemapLoader
from langchain_community.document_loaders.sitemap import _extract_scheme_and_domain
def test_sitemap() -> None:
"""Test sitemap loader."""
loader = SitemapLoader("https://api.python.langchain.com/sitemap.xml")
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1, blocknum=1
)
documents = loader.load()
assert len(documents) == 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_only_one() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000, blocknum=0
)
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_blocknum_default() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000
)
documents = loader.load()
assert len(documents) > 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_block_size_to_small() -> None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match="Sitemap blocksize should be at least 1"):
SitemapLoader("https://api.python.langchain.com/sitemap.xml", blocksize=0)
def test_sitemap_block_num_to_small() -> None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match="Sitemap blocknum can not be lower then 0"):
SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
blocksize=1000000,
blocknum=-1,
)
def test_sitemap_block_does_not_exists() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml", blocksize=1000000, blocknum=15
)
with pytest.raises(
ValueError,
match="Selected sitemap does not contain enough blocks for given blocknum",
):
loader.load()
def test_filter_sitemap() -> None:
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
filter_urls=["https://api.python.langchain.com/en/stable/"],
)
documents = loader.load()
assert len(documents) == 1
assert "LangChain Python API" in documents[0].page_content
def test_sitemap_metadata() -> None:
def sitemap_metadata_one(meta: dict, _content: None) -> dict:
return {**meta, "mykey": "Super Important Metadata"}
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
meta_function=sitemap_metadata_one,
)
documents = loader.load()
assert len(documents) > 1
assert "mykey" in documents[0].metadata
assert "Super Important Metadata" in documents[0].metadata["mykey"]
def test_sitemap_metadata_extraction() -> None:
def sitemap_metadata_two(meta: dict, content: Any) -> dict:
title = content.find("title")
if title:
return {**meta, "title": title.get_text()}
return meta
"""Test sitemap loader."""
loader = SitemapLoader(
"https://api.python.langchain.com/sitemap.xml",
meta_function=sitemap_metadata_two,
)
documents = loader.load()
assert len(documents) > 1
assert "title" in documents[0].metadata
assert "LangChain" in documents[0].metadata["title"]
def test_sitemap_metadata_default() -> None:
"""Test sitemap loader."""
loader = SitemapLoader("https://api.python.langchain.com/sitemap.xml")
documents = loader.load()
assert len(documents) > 1
assert "source" in documents[0].metadata
assert "loc" in documents[0].metadata
def test_local_sitemap() -> None:
"""Test sitemap loader."""
file_path = Path(__file__).parent.parent / "examples/sitemap.xml"
loader = SitemapLoader(str(file_path), is_local=True)
documents = loader.load()
assert len(documents) > 1
assert "🦜️🔗" in documents[0].page_content
def test_extract_domain() -> None:
"""Test domain extraction."""
assert _extract_scheme_and_domain("https://js.langchain.com/sitemap.xml") == (
"https",
"js.langchain.com",
)
assert _extract_scheme_and_domain("http://example.com/path/to/page") == (
"http",
"example.com",
)
assert _extract_scheme_and_domain("ftp://files.example.com") == (
"ftp",
"files.example.com",
)
assert _extract_scheme_and_domain("https://deep.subdomain.example.com") == (
"https",
"deep.subdomain.example.com",
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_nuclia.py | import json
import os
from typing import Any
from unittest import mock
from langchain_community.document_loaders.nuclia import NucliaLoader
from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
def fakerun(**args: Any) -> Any:
def run(self: Any, **args: Any) -> str:
data = {
"extracted_text": [{"body": {"text": "Hello World"}}],
"file_extracted_data": [{"language": "en"}],
"field_metadata": [
{
"metadata": {
"metadata": {
"paragraphs": [
{"end": 66, "sentences": [{"start": 1, "end": 67}]}
]
}
}
}
],
}
return json.dumps(data)
return run
@mock.patch.dict(os.environ, {"NUCLIA_NUA_KEY": "_a_key_"})
def test_nuclia_loader() -> None:
with mock.patch(
"langchain_community.tools.nuclia.tool.NucliaUnderstandingAPI._run",
new_callable=fakerun,
):
nua = NucliaUnderstandingAPI(enable_ml=False)
loader = NucliaLoader("/whatever/file.mp3", nua)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "Hello World"
assert docs[0].metadata["file"]["language"] == "en"
assert (
len(docs[0].metadata["metadata"]["metadata"]["metadata"]["paragraphs"]) == 1
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_geodataframe.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import GeoDataFrameLoader
if TYPE_CHECKING:
from geopandas import GeoDataFrame
else:
GeoDataFrame = "geopandas.GeoDataFrame"
@pytest.mark.requires("geopandas")
def sample_gdf() -> GeoDataFrame:
import geopandas
# TODO: geopandas.datasets will be deprecated in 1.0
path_to_data = geopandas.datasets.get_path("nybb")
gdf = geopandas.read_file(path_to_data)
gdf["area"] = gdf.area
gdf["crs"] = gdf.crs.to_string()
return gdf.head(2)
@pytest.mark.requires("geopandas")
def test_load_returns_list_of_documents(sample_gdf: GeoDataFrame) -> None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
@pytest.mark.requires("geopandas")
def test_load_converts_dataframe_columns_to_document_metadata(
sample_gdf: GeoDataFrame,
) -> None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata["area"] == sample_gdf.loc[i, "area"]
assert doc.metadata["crs"] == sample_gdf.loc[i, "crs"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_python.py | from pathlib import Path
import pytest
from langchain_community.document_loaders.python import PythonLoader
@pytest.mark.parametrize("filename", ["default-encoding.py", "non-utf8-encoding.py"])
def test_python_loader(filename: str) -> None:
"""Test Python loader."""
file_path = Path(__file__).parent.parent / "examples" / filename
loader = PythonLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
assert metadata["source"] == str(file_path)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_ifixit.py | from langchain_community.document_loaders.ifixit import IFixitLoader
def test_ifixit_loader() -> None:
"""Test iFixit loader."""
web_path = "https://www.ifixit.com/Guide/iPad+9+Battery+Replacement/151279"
loader = IFixitLoader(web_path)
assert loader.page_type == "Guide"
assert loader.id == "151279"
assert loader.web_path == web_path
def test_ifixit_loader_teardown() -> None:
web_path = "https://www.ifixit.com/Teardown/Banana+Teardown/811"
loader = IFixitLoader(web_path)
""" Teardowns are just guides by a different name """
assert loader.page_type == "Guide"
assert loader.id == "811"
def test_ifixit_loader_device() -> None:
web_path = "https://www.ifixit.com/Device/Standard_iPad"
loader = IFixitLoader(web_path)
""" Teardowns are just guides by a different name """
assert loader.page_type == "Device"
assert loader.id == "Standard_iPad"
def test_ifixit_loader_answers() -> None:
web_path = (
"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+"
"opening+apps+by+itself"
)
loader = IFixitLoader(web_path)
assert loader.page_type == "Answers"
assert loader.id == "318583"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_mastodon.py | """Tests for the Mastodon toots loader"""
from langchain_community.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@Gargron@mastodon.social"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_tensorflow_datasets.py | """Integration tests for the TensorFlow Dataset Loader."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain_core.documents import Document
from pydantic import ValidationError
from langchain_community.document_loaders.tensorflow_datasets import (
TensorflowDatasetLoader,
)
if TYPE_CHECKING:
import tensorflow as tf
def decode_to_str(item: tf.Tensor) -> str:
return item.numpy().decode("utf-8")
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
MAX_DOCS = 10
@pytest.fixture
def tfds_client() -> TensorflowDatasetLoader:
return TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
def test_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = tfds_client.load()
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_lazy_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = list(tfds_client.lazy_load())
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_load_fail_wrong_dataset_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="wrong_dataset_name",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "the dataset name is spelled correctly" in str(exc_info.value)
def test_load_fail_wrong_split_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="wrong_split_name",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "Unknown split" in str(exc_info.value)
def test_load_fail_no_func() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
)
assert "Please provide a function" in str(exc_info.value)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/test_joplin.py | from langchain_community.document_loaders.joplin import JoplinLoader
def test_joplin_loader() -> None:
loader = JoplinLoader()
docs = loader.load()
assert isinstance(docs, list)
assert isinstance(docs[0].page_content, str)
assert isinstance(docs[0].metadata["source"], str)
assert isinstance(docs[0].metadata["title"], str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/parsers/test_pdf_parsers.py | """Tests for the various PDF parsers."""
from pathlib import Path
from typing import Iterator
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers.pdf import (
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
# PDFs to test parsers on.
HELLO_PDF = Path(__file__).parent.parent.parent / "examples" / "hello.pdf"
LAYOUT_PARSER_PAPER_PDF = (
Path(__file__).parent.parent.parent / "examples" / "layout-parser-paper.pdf"
)
DUPLICATE_CHARS = (
Path(__file__).parent.parent.parent / "examples" / "duplicate-chars.pdf"
)
def _assert_with_parser(parser: BaseBlobParser, splits_by_page: bool = True) -> None:
"""Standard tests to verify that the given parser works.
Args:
parser (BaseBlobParser): The parser to test.
splits_by_page (bool): Whether the parser splits by page or not by default.
"""
blob = Blob.from_path(HELLO_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
assert len(docs) == 1
page_content = docs[0].page_content
assert isinstance(page_content, str)
# The different parsers return different amount of whitespace, so using
# startswith instead of equals.
assert docs[0].page_content.startswith("Hello world!")
blob = Blob.from_path(LAYOUT_PARSER_PAPER_PDF)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
if splits_by_page:
assert len(docs) == 16
else:
assert len(docs) == 1
# Test is imprecise since the parsers yield different parse information depending
# on configuration. Each parser seems to yield a slightly different result
# for this page!
assert "LayoutParser" in docs[0].page_content
metadata = docs[0].metadata
assert metadata["source"] == str(LAYOUT_PARSER_PAPER_PDF)
if splits_by_page:
assert metadata["page"] == 0
def _assert_with_duplicate_parser(parser: BaseBlobParser, dedupe: bool = False) -> None:
"""PDFPlumber tests to verify that duplicate characters appear or not
Args:
parser (BaseBlobParser): The parser to test.
splits_by_page (bool): Whether the parser splits by page or not by default.
dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
"""
blob = Blob.from_path(DUPLICATE_CHARS)
doc_generator = parser.lazy_parse(blob)
assert isinstance(doc_generator, Iterator)
docs = list(doc_generator)
if dedupe:
# use dedupe avoid duplicate characters.
assert "1000 Series" == docs[0].page_content.split("\n")[0]
else:
# duplicate characters will appear in doc if not dedupe
assert "11000000 SSeerriieess" == docs[0].page_content.split("\n")[0]
def test_pymupdf_loader() -> None:
"""Test PyMuPDF loader."""
_assert_with_parser(PyMuPDFParser())
def test_pypdf_parser() -> None:
"""Test PyPDF parser."""
_assert_with_parser(PyPDFParser())
def test_pdfminer_parser() -> None:
"""Test PDFMiner parser."""
# Does not follow defaults to split by page.
_assert_with_parser(PDFMinerParser(), splits_by_page=False)
def test_pypdfium2_parser() -> None:
"""Test PyPDFium2 parser."""
# Does not follow defaults to split by page.
_assert_with_parser(PyPDFium2Parser())
def test_pdfplumber_parser() -> None:
"""Test PDFPlumber parser."""
_assert_with_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser(dedupe=True), dedupe=True)
def test_extract_images_text_from_pdf_pypdfparser() -> None:
"""Test extract image from pdf and recognize text with rapid ocr - PyPDFParser"""
_assert_with_parser(PyPDFParser(extract_images=True))
def test_extract_images_text_from_pdf_pdfminerparser() -> None:
"""Test extract image from pdf and recognize text with rapid ocr - PDFMinerParser"""
_assert_with_parser(PDFMinerParser(extract_images=True))
def test_extract_images_text_from_pdf_pymupdfparser() -> None:
"""Test extract image from pdf and recognize text with rapid ocr - PyMuPDFParser"""
_assert_with_parser(PyMuPDFParser(extract_images=True))
def test_extract_images_text_from_pdf_pypdfium2parser() -> None:
"""Test extract image from pdf and recognize text with rapid ocr - PyPDFium2Parser""" # noqa: E501
_assert_with_parser(PyPDFium2Parser(extract_images=True))
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/parsers/test_language.py | from pathlib import Path
import pytest
from langchain_community.document_loaders.concurrent import ConcurrentLoader
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import LanguageParser
def test_language_loader_for_python() -> None:
"""Test Python loader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = GenericLoader.from_filesystem(
file_path, glob="hello_world.py", parser=LanguageParser(parser_threshold=5)
)
docs = loader.load()
assert len(docs) == 2
metadata = docs[0].metadata
assert metadata["source"] == str(file_path / "hello_world.py")
assert metadata["content_type"] == "functions_classes"
assert metadata["language"] == "python"
metadata = docs[1].metadata
assert metadata["source"] == str(file_path / "hello_world.py")
assert metadata["content_type"] == "simplified_code"
assert metadata["language"] == "python"
assert (
docs[0].page_content
== """def main():
print("Hello World!") # noqa: T201
return 0"""
)
assert (
docs[1].page_content
== """#!/usr/bin/env python3
import sys
# Code for: def main():
if __name__ == "__main__":
sys.exit(main())"""
)
def test_language_loader_for_python_with_parser_threshold() -> None:
"""Test Python loader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = GenericLoader.from_filesystem(
file_path,
glob="hello_world.py",
parser=LanguageParser(language="python", parser_threshold=1000),
)
docs = loader.load()
assert len(docs) == 1
def esprima_installed() -> bool:
try:
import esprima # noqa: F401
return True
except Exception as e:
print(f"esprima not installed, skipping test {e}") # noqa: T201
return False
@pytest.mark.skipif(not esprima_installed(), reason="requires esprima package")
def test_language_loader_for_javascript() -> None:
"""Test JavaScript loader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = GenericLoader.from_filesystem(
file_path, glob="hello_world.js", parser=LanguageParser(parser_threshold=5)
)
docs = loader.load()
assert len(docs) == 3
metadata = docs[0].metadata
assert metadata["source"] == str(file_path / "hello_world.js")
assert metadata["content_type"] == "functions_classes"
assert metadata["language"] == "js"
metadata = docs[1].metadata
assert metadata["source"] == str(file_path / "hello_world.js")
assert metadata["content_type"] == "functions_classes"
assert metadata["language"] == "js"
metadata = docs[2].metadata
assert metadata["source"] == str(file_path / "hello_world.js")
assert metadata["content_type"] == "simplified_code"
assert metadata["language"] == "js"
assert (
docs[0].page_content
== """class HelloWorld {
sayHello() {
console.log("Hello World!");
}
}"""
)
assert (
docs[1].page_content
== """function main() {
const hello = new HelloWorld();
hello.sayHello();
}"""
)
assert (
docs[2].page_content
== """// Code for: class HelloWorld {
// Code for: function main() {
main();"""
)
def test_language_loader_for_javascript_with_parser_threshold() -> None:
"""Test JavaScript loader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = GenericLoader.from_filesystem(
file_path,
glob="hello_world.js",
parser=LanguageParser(language="js", parser_threshold=1000),
)
docs = loader.load()
assert len(docs) == 1
def test_concurrent_language_loader_for_javascript_with_parser_threshold() -> None:
"""Test JavaScript ConcurrentLoader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = ConcurrentLoader.from_filesystem(
file_path,
glob="hello_world.js",
parser=LanguageParser(language="js", parser_threshold=1000),
)
docs = loader.load()
assert len(docs) == 1
def test_concurrent_language_loader_for_python_with_parser_threshold() -> None:
"""Test Python ConcurrentLoader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = ConcurrentLoader.from_filesystem(
file_path,
glob="hello_world.py",
parser=LanguageParser(language="python", parser_threshold=1000),
)
docs = loader.load()
assert len(docs) == 1
@pytest.mark.skipif(not esprima_installed(), reason="requires esprima package")
def test_concurrent_language_loader_for_javascript() -> None:
"""Test JavaScript ConcurrentLoader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = ConcurrentLoader.from_filesystem(
file_path, glob="hello_world.js", parser=LanguageParser(parser_threshold=5)
)
docs = loader.load()
assert len(docs) == 3
def test_concurrent_language_loader_for_python() -> None:
"""Test Python ConcurrentLoader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / "examples"
loader = ConcurrentLoader.from_filesystem(
file_path, glob="hello_world.py", parser=LanguageParser(parser_threshold=5)
)
docs = loader.load()
assert len(docs) == 2
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders | lc_public_repos/langchain/libs/community/tests/integration_tests/document_loaders/parsers/test_docai.py | """Tests for the Google Cloud DocAI parser."""
from unittest.mock import ANY, patch
import pytest
from langchain_community.document_loaders.parsers import DocAIParser
@pytest.mark.requires("google.cloud", "google.cloud.documentai")
def test_docai_parser_valid_processor_name() -> None:
processor_name = "projects/123456/locations/us-central1/processors/ab123dfg"
with patch("google.cloud.documentai.DocumentProcessorServiceClient") as test_client:
parser = DocAIParser(processor_name=processor_name, location="us")
test_client.assert_called_once_with(client_options=ANY, client_info=ANY)
assert parser._processor_name == processor_name
@pytest.mark.requires("google.cloud", "google.cloud.documentai")
@pytest.mark.parametrize(
"processor_name",
["projects/123456/locations/us-central1/processors/ab123dfg:publish", "ab123dfg"],
)
def test_docai_parser_invalid_processor_name(processor_name: str) -> None:
with patch("google.cloud.documentai.DocumentProcessorServiceClient"):
with pytest.raises(ValueError):
_ = DocAIParser(processor_name=processor_name, location="us")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/chat_message_histories/test_zep.py | from typing import TYPE_CHECKING
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore
"test_session"
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/chat_message_histories/test_streamlit.py | """Unit tests for StreamlitChatMessageHistory functionality."""
import pytest
test_script = """
import json
import streamlit as st
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from langchain_core.messages import message_to_dict, BaseMessage
message_history = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(chat_memory=message_history, return_messages=True)
# Add some messages
if st.checkbox("add initial messages", value=True):
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
else:
st.markdown("Skipped add")
# Clear messages if checked
if st.checkbox("clear messages"):
st.markdown("Cleared!")
memory.chat_memory.clear()
# Use message setter
if st.checkbox("Override messages"):
memory.chat_memory.messages = [
BaseMessage(content="A basic message", type="basic")
]
st.session_state["langchain_messages"].append(
BaseMessage(content="extra cool message", type="basic")
)
# Write the output to st.code as a json blob for inspection
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
st.text(messages_json)
"""
@pytest.mark.requires("streamlit")
def test_memory_with_message_store() -> None:
try:
from streamlit.testing.v1 import AppTest
except ModuleNotFoundError:
pytest.skip("Incorrect version of Streamlit installed")
at = AppTest.from_string(test_script).run(timeout=10)
# Initial run should write two messages
messages_json = at.get("text")[-1].value
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# Uncheck the initial write, they should persist in session_state
at.get("checkbox")[0].uncheck().run()
assert at.get("markdown")[0].value == "Skipped add"
messages_json = at.get("text")[-1].value
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# Clear the message history
at.get("checkbox")[1].check().run()
assert at.get("markdown")[1].value == "Cleared!"
messages_json = at.get("text")[-1].value
assert messages_json == "[]"
# Use message setter
at.get("checkbox")[1].uncheck()
at.get("checkbox")[2].check().run()
messages_json = at.get("text")[-1].value
assert "A basic message" in messages_json
assert "extra cool message" in messages_json
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/chat_message_histories/test_tidb.py | import os
import pytest
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_message_histories import TiDBChatMessageHistory
try:
CONNECTION_STRING = os.getenv("TEST_TiDB_CHAT_URL", "")
if CONNECTION_STRING == "":
raise OSError("TEST_TiDB_URL environment variable is not set")
tidb_available = True
except (OSError, ImportError):
tidb_available = False
@pytest.mark.skipif(not tidb_available, reason="tidb is not available")
def test_add_messages() -> None:
"""Basic testing: adding messages to the TiDBChatMessageHistory."""
message_store = TiDBChatMessageHistory("23334", CONNECTION_STRING)
message_store.clear()
assert len(message_store.messages) == 0
message_store.add_user_message("Hello! Language Chain!")
message_store.add_ai_message("Hi Guys!")
# create another message store to check if the messages are stored correctly
message_store_another = TiDBChatMessageHistory("46666", CONNECTION_STRING)
message_store_another.clear()
assert len(message_store_another.messages) == 0
message_store_another.add_user_message("Hello! Bot!")
message_store_another.add_ai_message("Hi there!")
message_store_another.add_user_message("How's this pr going?")
# Now check if the messages are stored in the database correctly
assert len(message_store.messages) == 2
assert isinstance(message_store.messages[0], HumanMessage)
assert isinstance(message_store.messages[1], AIMessage)
assert message_store.messages[0].content == "Hello! Language Chain!"
assert message_store.messages[1].content == "Hi Guys!"
assert len(message_store_another.messages) == 3
assert isinstance(message_store_another.messages[0], HumanMessage)
assert isinstance(message_store_another.messages[1], AIMessage)
assert isinstance(message_store_another.messages[2], HumanMessage)
assert message_store_another.messages[0].content == "Hello! Bot!"
assert message_store_another.messages[1].content == "Hi there!"
assert message_store_another.messages[2].content == "How's this pr going?"
# Now clear the first history
message_store.clear()
assert len(message_store.messages) == 0
assert len(message_store_another.messages) == 3
message_store_another.clear()
assert len(message_store.messages) == 0
assert len(message_store_another.messages) == 0
def test_tidb_recent_chat_message(): # type: ignore[no-untyped-def]
"""Test the TiDBChatMessageHistory with earliest_time parameter."""
import time
from datetime import datetime
# prepare some messages
message_store = TiDBChatMessageHistory("2333", CONNECTION_STRING)
message_store.clear()
assert len(message_store.messages) == 0
message_store.add_user_message("Hello! Language Chain!")
message_store.add_ai_message("Hi Guys!")
assert len(message_store.messages) == 2
assert isinstance(message_store.messages[0], HumanMessage)
assert isinstance(message_store.messages[1], AIMessage)
assert message_store.messages[0].content == "Hello! Language Chain!"
assert message_store.messages[1].content == "Hi Guys!"
# now we add some recent messages to the database
earliest_time = datetime.utcnow()
time.sleep(1)
message_store.add_user_message("How's this pr going?")
message_store.add_ai_message("It's almost done!")
assert len(message_store.messages) == 4
assert isinstance(message_store.messages[2], HumanMessage)
assert isinstance(message_store.messages[3], AIMessage)
assert message_store.messages[2].content == "How's this pr going?"
assert message_store.messages[3].content == "It's almost done!"
# now we create another message store with earliest_time parameter
message_store_another = TiDBChatMessageHistory(
"2333", CONNECTION_STRING, earliest_time=earliest_time
)
assert len(message_store_another.messages) == 2
assert isinstance(message_store_another.messages[0], HumanMessage)
assert isinstance(message_store_another.messages[1], AIMessage)
assert message_store_another.messages[0].content == "How's this pr going?"
assert message_store_another.messages[1].content == "It's almost done!"
# now we clear the message store
message_store.clear()
assert len(message_store.messages) == 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/chat_message_histories/test_neo4j.py | import os
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_message_histories import Neo4jChatMessageHistory
from langchain_community.graphs import Neo4jGraph
def test_add_messages() -> None:
"""Basic testing: adding messages to the Neo4jChatMessageHistory."""
assert os.environ.get("NEO4J_URI") is not None
assert os.environ.get("NEO4J_USERNAME") is not None
assert os.environ.get("NEO4J_PASSWORD") is not None
message_store = Neo4jChatMessageHistory("23334")
message_store.clear()
assert len(message_store.messages) == 0
message_store.add_user_message("Hello! Language Chain!")
message_store.add_ai_message("Hi Guys!")
# create another message store to check if the messages are stored correctly
message_store_another = Neo4jChatMessageHistory("46666")
message_store_another.clear()
assert len(message_store_another.messages) == 0
message_store_another.add_user_message("Hello! Bot!")
message_store_another.add_ai_message("Hi there!")
message_store_another.add_user_message("How's this pr going?")
# Now check if the messages are stored in the database correctly
assert len(message_store.messages) == 2
assert isinstance(message_store.messages[0], HumanMessage)
assert isinstance(message_store.messages[1], AIMessage)
assert message_store.messages[0].content == "Hello! Language Chain!"
assert message_store.messages[1].content == "Hi Guys!"
assert len(message_store_another.messages) == 3
assert isinstance(message_store_another.messages[0], HumanMessage)
assert isinstance(message_store_another.messages[1], AIMessage)
assert isinstance(message_store_another.messages[2], HumanMessage)
assert message_store_another.messages[0].content == "Hello! Bot!"
assert message_store_another.messages[1].content == "Hi there!"
assert message_store_another.messages[2].content == "How's this pr going?"
# Now clear the first history
message_store.clear()
assert len(message_store.messages) == 0
assert len(message_store_another.messages) == 3
message_store_another.clear()
assert len(message_store.messages) == 0
assert len(message_store_another.messages) == 0
def test_add_messages_graph_object() -> None:
"""Basic testing: Passing driver through graph object."""
assert os.environ.get("NEO4J_URI") is not None
assert os.environ.get("NEO4J_USERNAME") is not None
assert os.environ.get("NEO4J_PASSWORD") is not None
graph = Neo4jGraph()
# rewrite env for testing
os.environ["NEO4J_USERNAME"] = "foo"
message_store = Neo4jChatMessageHistory("23334", graph=graph)
message_store.clear()
assert len(message_store.messages) == 0
message_store.add_user_message("Hello! Language Chain!")
message_store.add_ai_message("Hi Guys!")
# Now check if the messages are stored in the database correctly
assert len(message_store.messages) == 2
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/agent/test_powerbi_agent.py | import pytest
from langchain_core.utils import get_from_env
from langchain_community.agent_toolkits import PowerBIToolkit, create_pbi_agent
from langchain_community.chat_models import ChatOpenAI
from langchain_community.utilities.powerbi import PowerBIDataset
def azure_installed() -> bool:
try:
from azure.core.credentials import TokenCredential # noqa: F401
from azure.identity import DefaultAzureCredential # noqa: F401
return True
except Exception as e:
print(f"azure not installed, skipping test {e}") # noqa: T201
return False
@pytest.mark.skipif(not azure_installed(), reason="requires azure package")
def test_daxquery() -> None:
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env("", "POWERBI_DATASET_ID")
TABLE_NAME = get_from_env("", "POWERBI_TABLE_NAME")
NUM_ROWS = get_from_env("", "POWERBI_NUMROWS")
fast_llm = ChatOpenAI(
temperature=0.5, max_tokens=1000, model_name="gpt-3.5-turbo", verbose=True
) # type: ignore[call-arg]
smart_llm = ChatOpenAI(
temperature=0, max_tokens=100, model_name="gpt-4", verbose=True
) # type: ignore[call-arg]
toolkit = PowerBIToolkit(
powerbi=PowerBIDataset(
dataset_id=DATASET_ID,
table_names=[TABLE_NAME],
credential=DefaultAzureCredential(),
),
llm=smart_llm,
)
agent_executor = create_pbi_agent(llm=fast_llm, toolkit=toolkit, verbose=True)
output = agent_executor.run(f"How many rows are in the table, {TABLE_NAME}")
assert NUM_ROWS in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/agent/test_ainetwork_agent.py | import asyncio
import os
import time
import urllib.request
import uuid
from enum import Enum
from typing import Any
from urllib.error import HTTPError
import pytest
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools.ainetwork.utils import authenticate
class Match(Enum):
__test__ = False
ListWildcard = 1
StrWildcard = 2
DictWildcard = 3
IntWildcard = 4
FloatWildcard = 5
ObjectWildcard = 6
@classmethod
def match(cls, value: Any, template: Any) -> bool:
if template is cls.ListWildcard:
return isinstance(value, list)
elif template is cls.StrWildcard:
return isinstance(value, str)
elif template is cls.DictWildcard:
return isinstance(value, dict)
elif template is cls.IntWildcard:
return isinstance(value, int)
elif template is cls.FloatWildcard:
return isinstance(value, float)
elif template is cls.ObjectWildcard:
return True
elif type(value) is not type(template):
return False
elif isinstance(value, dict):
if len(value) != len(template):
return False
for k, v in value.items():
if k not in template or not cls.match(v, template[k]):
return False
return True
elif isinstance(value, list):
if len(value) != len(template):
return False
for i in range(len(value)):
if not cls.match(value[i], template[i]):
return False
return True
else:
return value == template
@pytest.mark.requires("ain")
def test_ainetwork_toolkit() -> None:
def get(path: str, type: str = "value", default: Any = None) -> Any:
ref = ain.db.ref(path)
value = asyncio.run(
{
"value": ref.getValue,
"rule": ref.getRule,
"owner": ref.getOwner,
}[type]()
)
return default if value is None else value
def validate(path: str, template: Any, type: str = "value") -> bool:
value = get(path, type)
return Match.match(value, template)
if not os.environ.get("AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY", None):
from ain.account import Account
account = Account.create()
os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"] = account.private_key
interface = authenticate(network="testnet")
toolkit = AINetworkToolkit(network="testnet", interface=interface)
llm = ChatOpenAI(model="gpt-4", temperature=0)
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
verbose=True,
agent=AgentType.OPENAI_FUNCTIONS,
)
ain = interface
self_address = ain.wallet.defaultAccount.address
co_address = "0x6813Eb9362372EEF6200f3b1dbC3f819671cBA69"
# Test creating an app
UUID = uuid.UUID(
int=(int(time.time() * 1000) << 64) | (uuid.uuid4().int & ((1 << 64) - 1))
)
app_name = f"_langchain_test__{str(UUID).replace('-', '_')}"
agent.run(f"""Create app {app_name}""")
validate(f"/manage_app/{app_name}/config", {"admin": {self_address: True}})
validate(f"/apps/{app_name}/DB", None, "owner")
# Test reading owner config
agent.run(f"""Read owner config of /apps/{app_name}/DB .""")
assert ...
# Test granting owner config
agent.run(
f"""Grant owner authority to {co_address} for edit write rule permission of /apps/{app_name}/DB_co .""" # noqa: E501
)
validate(
f"/apps/{app_name}/DB_co",
{
".owner": {
"owners": {
co_address: {
"branch_owner": False,
"write_function": False,
"write_owner": False,
"write_rule": True,
}
}
}
},
"owner",
)
# Test reading owner config
agent.run(f"""Read owner config of /apps/{app_name}/DB_co .""")
assert ...
# Test reading owner config
agent.run(f"""Read owner config of /apps/{app_name}/DB .""")
assert ... # Check if owner {self_address} exists
# Test reading a value
agent.run(f"""Read value in /apps/{app_name}/DB""")
assert ... # empty
# Test writing a value
agent.run(f"""Write value {{1: 1904, 2: 43}} in /apps/{app_name}/DB""")
validate(f"/apps/{app_name}/DB", {1: 1904, 2: 43})
# Test reading a value
agent.run(f"""Read value in /apps/{app_name}/DB""")
assert ... # check value
# Test reading a rule
agent.run(f"""Read write rule of app {app_name} .""")
assert ... # check rule that self_address exists
# Test sending AIN
self_balance = get(f"/accounts/{self_address}/balance", default=0)
transaction_history = get(f"/transfer/{self_address}/{co_address}", default={})
if self_balance < 1:
try:
with urllib.request.urlopen(
f"http://faucet.ainetwork.ai/api/test/{self_address}/"
) as response:
try_test = response.getcode()
except HTTPError as e:
try_test = e.getcode()
else:
try_test = 200
if try_test == 200:
agent.run(f"""Send 1 AIN to {co_address}""")
transaction_update = get(f"/transfer/{self_address}/{co_address}", default={})
assert any(
transaction_update[key]["value"] == 1
for key in transaction_update.keys() - transaction_history.keys()
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py | """Integration tests for the langchain tracer module."""
import asyncio
import os
from aiohttp import ClientSession
from langchain_community.callbacks import wandb_tracing_enabled
from langchain_community.llms import OpenAI
questions = [
(
"Who won the US Open men's final in 2019? "
"What is his age raised to the 0.334 power?"
),
(
"Who is Olivia Wilde's boyfriend? "
"What is his current age raised to the 0.23 power?"
),
(
"Who won the most recent formula 1 grand prix? "
"What is their age raised to the 0.23 power?"
),
(
"Who won the US Open women's final in 2019? "
"What is her age raised to the 0.34 power?"
),
("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"),
]
def test_tracing_sequential() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
os.environ["WANDB_PROJECT"] = "langchain-tracing"
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(q)
def test_tracing_session_env_var() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(questions[0])
async def test_tracing_concurrent() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
aiosession = ClientSession()
llm = OpenAI(temperature=0)
async_tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
aiosession=aiosession,
)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
tasks = [agent.arun(q) for q in questions[:3]]
await asyncio.gather(*tasks)
await aiosession.close()
def test_tracing_context_manager() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_WANDB_TRACING"]
with wandb_tracing_enabled():
agent.run(questions[0]) # this should be traced
agent.run(questions[0]) # this should not be traced
async def test_tracing_context_manager_async() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
llm = OpenAI(temperature=0)
async_tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_TRACING"]
# start a background task
task = asyncio.create_task(agent.arun(questions[0])) # this should not be traced
with wandb_tracing_enabled():
tasks = [agent.arun(q) for q in questions[1:4]] # these should be traced
await asyncio.gather(*tasks)
await task
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/callbacks/test_streamlit_callback.py | """Integration tests for the StreamlitCallbackHandler module."""
import pytest
# Import the internal StreamlitCallbackHandler from its module - and not from
# the `langchain_community.callbacks.streamlit` package - so that we don't end up using
# Streamlit's externally-provided callback handler.
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
StreamlitCallbackHandler,
)
from langchain_community.llms import OpenAI
@pytest.mark.requires("streamlit")
def test_streamlit_callback_agent() -> None:
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
streamlit_callback = StreamlitCallbackHandler(st.container())
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(
"Who is Olivia Wilde's boyfriend? "
"What is his current age raised to the 0.23 power?",
callbacks=[streamlit_callback],
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py | """Integration tests for the langchain tracer module."""
import asyncio
import os
from aiohttp import ClientSession
from langchain_core.callbacks.manager import atrace_as_chain_group, trace_as_chain_group
from langchain_core.prompts import PromptTemplate
from langchain_core.tracers.context import tracing_v2_enabled
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms import OpenAI
questions = [
(
"Who won the US Open men's final in 2019? "
"What is his age raised to the 0.334 power?"
),
(
"Who is Olivia Wilde's boyfriend? "
"What is his current age raised to the 0.23 power?"
),
(
"Who won the most recent formula 1 grand prix? "
"What is their age raised to the 0.23 power?"
),
(
"Who won the US Open women's final in 2019? "
"What is her age raised to the 0.34 power?"
),
("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"),
]
def test_tracing_sequential() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING"] = "true"
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math", "serpapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(q)
def test_tracing_session_env_var() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING"] = "true"
os.environ["LANGCHAIN_SESSION"] = "my_session"
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math", "serpapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(questions[0])
if "LANGCHAIN_SESSION" in os.environ:
del os.environ["LANGCHAIN_SESSION"]
async def test_tracing_concurrent() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING"] = "true"
aiosession = ClientSession()
llm = OpenAI(temperature=0)
async_tools = load_tools(["llm-math", "serpapi"], llm=llm, aiosession=aiosession)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
tasks = [agent.arun(q) for q in questions[:3]]
await asyncio.gather(*tasks)
await aiosession.close()
async def test_tracing_concurrent_bw_compat_environ() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_HANDLER"] = "langchain"
if "LANGCHAIN_TRACING" in os.environ:
del os.environ["LANGCHAIN_TRACING"]
aiosession = ClientSession()
llm = OpenAI(temperature=0)
async_tools = load_tools(["llm-math", "serpapi"], llm=llm, aiosession=aiosession)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
tasks = [agent.arun(q) for q in questions[:3]]
await asyncio.gather(*tasks)
await aiosession.close()
if "LANGCHAIN_HANDLER" in os.environ:
del os.environ["LANGCHAIN_HANDLER"]
async def test_tracing_v2_environment_variable() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING_V2"] = "true"
aiosession = ClientSession()
llm = OpenAI(temperature=0)
async_tools = load_tools(["llm-math", "serpapi"], llm=llm, aiosession=aiosession)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
tasks = [agent.arun(q) for q in questions[:3]]
await asyncio.gather(*tasks)
await aiosession.close()
def test_tracing_v2_context_manager() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math", "serpapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
if "LANGCHAIN_TRACING_V2" in os.environ:
del os.environ["LANGCHAIN_TRACING_V2"]
with tracing_v2_enabled():
agent.run(questions[0]) # this should be traced
agent.run(questions[0]) # this should not be traced
def test_tracing_v2_chain_with_tags() -> None:
from langchain.chains.constitutional_ai.base import ConstitutionalChain
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
llm = OpenAI(temperature=0)
chain = ConstitutionalChain.from_llm(
llm,
chain=LLMChain.from_string(llm, "Q: {question} A:"),
tags=["only-root"],
constitutional_principles=[
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
],
)
if "LANGCHAIN_TRACING_V2" in os.environ:
del os.environ["LANGCHAIN_TRACING_V2"]
with tracing_v2_enabled():
chain.run("what is the meaning of life", tags=["a-tag"])
def test_tracing_v2_agent_with_metadata() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING_V2"] = "true"
llm = OpenAI(temperature=0)
chat = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math", "serpapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
chat_agent = initialize_agent(
tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
chat_agent.run(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
async def test_tracing_v2_async_agent_with_metadata() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ["LANGCHAIN_TRACING_V2"] = "true"
llm = OpenAI(temperature=0, metadata={"f": "g", "h": "i"})
chat = ChatOpenAI(temperature=0, metadata={"f": "g", "h": "i"})
async_tools = load_tools(["llm-math", "serpapi"], llm=llm)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
chat_agent = initialize_agent(
async_tools,
chat,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
await agent.arun(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
await chat_agent.arun(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
def test_trace_as_group() -> None:
from langchain.chains.llm import LLMChain
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
with trace_as_chain_group("my_group", inputs={"input": "cars"}) as group_manager:
chain.run(product="cars", callbacks=group_manager)
chain.run(product="computers", callbacks=group_manager)
final_res = chain.run(product="toys", callbacks=group_manager)
group_manager.on_chain_end({"output": final_res})
with trace_as_chain_group("my_group_2", inputs={"input": "toys"}) as group_manager:
final_res = chain.run(product="toys", callbacks=group_manager)
group_manager.on_chain_end({"output": final_res})
def test_trace_as_group_with_env_set() -> None:
from langchain.chains.llm import LLMChain
os.environ["LANGCHAIN_TRACING_V2"] = "true"
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
with trace_as_chain_group(
"my_group_env_set", inputs={"input": "cars"}
) as group_manager:
chain.run(product="cars", callbacks=group_manager)
chain.run(product="computers", callbacks=group_manager)
final_res = chain.run(product="toys", callbacks=group_manager)
group_manager.on_chain_end({"output": final_res})
with trace_as_chain_group(
"my_group_2_env_set", inputs={"input": "toys"}
) as group_manager:
final_res = chain.run(product="toys", callbacks=group_manager)
group_manager.on_chain_end({"output": final_res})
async def test_trace_as_group_async() -> None:
from langchain.chains.llm import LLMChain
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
async with atrace_as_chain_group("my_async_group") as group_manager:
await chain.arun(product="cars", callbacks=group_manager)
await chain.arun(product="computers", callbacks=group_manager)
await chain.arun(product="toys", callbacks=group_manager)
async with atrace_as_chain_group(
"my_async_group_2", inputs={"input": "toys"}
) as group_manager:
res = await asyncio.gather(
*[
chain.arun(product="toys", callbacks=group_manager),
chain.arun(product="computers", callbacks=group_manager),
chain.arun(product="cars", callbacks=group_manager),
]
)
await group_manager.on_chain_end({"output": res})
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/callbacks/test_openai_callback.py | """Integration tests for the langchain tracer module."""
import asyncio
from langchain_community.callbacks import get_openai_callback
from langchain_community.llms import OpenAI
async def test_openai_callback() -> None:
llm = OpenAI(temperature=0)
with get_openai_callback() as cb:
llm.invoke("What is the square root of 4?")
total_tokens = cb.total_tokens
assert total_tokens > 0
with get_openai_callback() as cb:
llm.invoke("What is the square root of 4?")
llm.invoke("What is the square root of 4?")
assert cb.total_tokens == total_tokens * 2
with get_openai_callback() as cb:
await asyncio.gather(
*[llm.agenerate(["What is the square root of 4?"]) for _ in range(3)]
)
assert cb.total_tokens == total_tokens * 3
task = asyncio.create_task(llm.agenerate(["What is the square root of 4?"]))
with get_openai_callback() as cb:
await llm.agenerate(["What is the square root of 4?"])
await task
assert cb.total_tokens == total_tokens
def test_openai_callback_batch_llm() -> None:
llm = OpenAI(temperature=0)
with get_openai_callback() as cb:
llm.generate(["What is the square root of 4?", "What is the square root of 4?"])
assert cb.total_tokens > 0
total_tokens = cb.total_tokens
with get_openai_callback() as cb:
llm.invoke("What is the square root of 4?")
llm.invoke("What is the square root of 4?")
assert cb.total_tokens == total_tokens
def test_openai_callback_agent() -> None:
from langchain.agents import AgentType, initialize_agent, load_tools
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
with get_openai_callback() as cb:
agent.run(
"Who is Olivia Wilde's boyfriend? "
"What is his current age raised to the 0.23 power?"
)
print(f"Total Tokens: {cb.total_tokens}") # noqa: T201
print(f"Prompt Tokens: {cb.prompt_tokens}") # noqa: T201
print(f"Completion Tokens: {cb.completion_tokens}") # noqa: T201
print(f"Total Cost (USD): ${cb.total_cost}") # noqa: T201
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.