index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_text_moderation.py | """Test EdenAi's text moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai.text_moderation import EdenAiTextModerationTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text moderation endpoint."""
text_moderation = EdenAiTextModerationTool(providers=["openai"], language="en") # type: ignore[call-arg]
output = text_moderation.invoke("i hate you")
assert text_moderation.name == "edenai_explicit_content_detection_text"
assert text_moderation.feature == "text"
assert text_moderation.subfeature == "moderation"
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/tools | lc_public_repos/langchain/libs/community/tests/integration_tests/tools/edenai/test_image_objectdetection.py | """Test EdenAi's object detection Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiObjectDetectionTool
def test_edenai_call() -> None:
"""Test simple call to edenai's object detection endpoint."""
object_detection = EdenAiObjectDetectionTool(providers=["google"]) # type: ignore[call-arg]
output = object_detection.invoke("https://static.javatpoint.com/images/objects.jpg")
assert object_detection.name == "edenai_object_detection"
assert object_detection.feature == "image"
assert object_detection.subfeature == "object_detection"
assert isinstance(output, str)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_clickup.py | """Integration test for JIRA API Wrapper."""
import json
from datetime import datetime
import pytest
from langchain_community.utilities.clickup import ClickupAPIWrapper
@pytest.fixture
def clickup_wrapper() -> ClickupAPIWrapper:
return ClickupAPIWrapper()
def test_init(clickup_wrapper: ClickupAPIWrapper) -> None:
assert isinstance(clickup_wrapper, ClickupAPIWrapper)
def test_get_access_code_url() -> None:
assert isinstance(
ClickupAPIWrapper.get_access_code_url("oauth_client_id", "oauth_client_secret"),
str,
)
def test_get_access_token() -> None:
output = ClickupAPIWrapper.get_access_token(
"oauth_client_id", "oauth_client_secret", "code"
)
assert output is None
def test_folder_related(clickup_wrapper: ClickupAPIWrapper) -> None:
time_str = datetime.now().strftime("%d/%m/%Y-%H:%M:%S")
task_name = f"Test Folder - {time_str}"
# Create Folder
create_response = json.loads(
clickup_wrapper.run(mode="create_folder", query=json.dumps({"name": task_name}))
)
assert create_response["name"] == task_name
def test_list_related(clickup_wrapper: ClickupAPIWrapper) -> None:
time_str = datetime.now().strftime("%d/%m/%Y-%H:%M:%S")
task_name = f"Test List - {time_str}"
# Create List
create_response = json.loads(
clickup_wrapper.run(mode="create_list", query=json.dumps({"name": task_name}))
)
assert create_response["name"] == task_name
def test_task_related(clickup_wrapper: ClickupAPIWrapper) -> None:
time_str = datetime.now().strftime("%d/%m/%Y-%H:%M:%S")
task_name = f"Test Task - {time_str}"
# Create task
create_response = json.loads(
clickup_wrapper.run(
mode="create_task",
query=json.dumps({"name": task_name, "description": "This is a Test"}),
)
)
assert create_response["name"] == task_name
# Get task
task_id = create_response["id"]
get_response = json.loads(
clickup_wrapper.run(mode="get_task", query=json.dumps({"task_id": task_id}))
)
assert get_response["name"] == task_name
# Update task
new_name = f"{task_name} - New"
clickup_wrapper.run(
mode="update_task",
query=json.dumps(
{"task_id": task_id, "attribute_name": "name", "value": new_name}
),
)
get_response_2 = json.loads(
clickup_wrapper.run(mode="get_task", query=json.dumps({"task_id": task_id}))
)
assert get_response_2["name"] == new_name
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_infobip.py | from typing import Dict
import responses
from langchain_community.utilities.infobip import InfobipAPIWrapper
def test_send_sms() -> None:
infobip: InfobipAPIWrapper = InfobipAPIWrapper(
infobip_api_key="test",
infobip_base_url="https://api.infobip.com",
)
json_response: Dict = {
"messages": [
{
"messageId": "123",
"status": {
"description": "Message sent to next instance",
"groupId": 1,
"groupName": "PENDING",
"id": 26,
"name": "PENDING_ACCEPTED",
},
"to": "41793026727",
}
]
}
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
"https://api.infobip.com/sms/2/text/advanced",
json=json_response,
status=200,
)
response: str = infobip.run(
body="test",
to="41793026727",
sender="41793026727",
channel="sms",
)
assert response == "123"
def test_send_email() -> None:
infobip: InfobipAPIWrapper = InfobipAPIWrapper(
infobip_api_key="test",
infobip_base_url="https://api.infobip.com",
)
json_response: Dict = {
"bulkId": "123",
"messages": [
{
"to": "test@example.com",
"messageId": "123",
"status": {
"groupId": 1,
"groupName": "PENDING",
"id": 26,
"name": "PENDING_ACCEPTED",
"description": "Message accepted, pending for delivery.",
},
}
],
}
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
"https://api.infobip.com/email/3/send",
json=json_response,
status=200,
)
response: str = infobip.run(
body="test",
to="test@example.com",
sender="test@example.com",
subject="test",
channel="email",
)
assert response == "123"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py | import pytest
from langchain_community.tools.ddg_search.tool import (
DuckDuckGoSearchResults,
DuckDuckGoSearchRun,
)
def ddg_installed() -> bool:
try:
from duckduckgo_search import DDGS # noqa: F401
return True
except Exception as e:
print(f"duckduckgo not installed, skipping test {e}") # noqa: T201
return False
@pytest.mark.skipif(not ddg_installed(), reason="requires duckduckgo-search package")
def test_ddg_search_tool() -> None:
keywords = "Bella Ciao"
tool = DuckDuckGoSearchRun()
result = tool.invoke(keywords)
print(result) # noqa: T201
assert len(result.split()) > 20
@pytest.mark.skipif(not ddg_installed(), reason="requires duckduckgo-search package")
def test_ddg_search_news_tool() -> None:
keywords = "Tesla"
tool = DuckDuckGoSearchResults(source="news") # type: ignore[call-arg]
result = tool.invoke(keywords)
print(result) # noqa: T201
assert len(result.split()) > 20
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_github.py | """Integration test for Github Wrapper."""
import pytest
from langchain_community.utilities.github import GitHubAPIWrapper
# Make sure you have set the following env variables:
# GITHUB_REPOSITORY
# GITHUB_BRANCH
# GITHUB_APP_ID
# GITHUB_PRIVATE_KEY
@pytest.fixture
def api_client() -> GitHubAPIWrapper:
return GitHubAPIWrapper() # type: ignore[call-arg]
def test_get_open_issues(api_client: GitHubAPIWrapper) -> None:
"""Basic test to fetch issues"""
issues = api_client.get_issues()
assert len(issues) != 0
def test_search_issues_and_prs(api_client: GitHubAPIWrapper) -> None:
"""Basic test to search issues and PRs"""
results = api_client.search_issues_and_prs("is:pr is:merged")
assert len(results) != 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_portkey.py | import json
from langchain_community.utilities import Portkey
def test_Config() -> None:
headers = Portkey.Config(
api_key="test_api_key",
environment="test_environment",
user="test_user",
organisation="test_organisation",
prompt="test_prompt",
retry_count=3,
trace_id="test_trace_id",
cache="simple",
cache_force_refresh="True",
cache_age=3600,
)
assert headers["x-portkey-api-key"] == "test_api_key"
assert headers["x-portkey-trace-id"] == "test_trace_id"
assert headers["x-portkey-retry-count"] == "3"
assert headers["x-portkey-cache"] == "simple"
assert headers["x-portkey-cache-force-refresh"] == "True"
assert headers["Cache-Control"] == "max-age:3600"
metadata = json.loads(headers["x-portkey-metadata"])
assert metadata["_environment"] == "test_environment"
assert metadata["_user"] == "test_user"
assert metadata["_organisation"] == "test_organisation"
assert metadata["_prompt"] == "test_prompt"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_twilio.py | """Integration test for Sms."""
from langchain_community.utilities.twilio import TwilioAPIWrapper
def test_call() -> None:
"""Test that call runs."""
twilio = TwilioAPIWrapper() # type: ignore[call-arg]
output = twilio.run("Message", "+16162904619")
assert output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py | """Integration test for Google Search API Wrapper."""
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.run("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
def test_no_result_call() -> None:
"""Test that call gives no result."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
print(type(output)) # noqa: T201
assert "No good Google Search Result was found" == output
def test_result_with_params_call() -> None:
"""Test that call gives the correct answer with extra params."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.results(
query="What was Obama's first name?",
num_results=5,
search_params={"cr": "us", "safe": "active"},
)
assert len(output)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_openweathermap.py | from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
def test_openweathermap_api_wrapper() -> None:
"""Test that OpenWeatherMapAPIWrapper returns correct data for London, GB."""
weather = OpenWeatherMapAPIWrapper() # type: ignore[call-arg]
weather_data = weather.run("London,GB")
assert weather_data is not None
assert "London" in weather_data
assert "GB" in weather_data
assert "Detailed status:" in weather_data
assert "Wind speed:" in weather_data
assert "direction:" in weather_data
assert "Humidity:" in weather_data
assert "Temperature:" in weather_data
assert "Current:" in weather_data
assert "High:" in weather_data
assert "Low:" in weather_data
assert "Feels like:" in weather_data
assert "Rain:" in weather_data
assert "Heat index:" in weather_data
assert "Cloud cover:" in weather_data
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_arxiv.py | """Integration test for Arxiv API Wrapper."""
from typing import Any, List
import pytest
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from langchain_community.tools import ArxivQueryRun
from langchain_community.utilities import ArxivAPIWrapper
@pytest.fixture
def api_client() -> ArxivAPIWrapper:
return ArxivAPIWrapper() # type: ignore[call-arg]
def test_run_success_paper_name(api_client: ArxivAPIWrapper) -> None:
"""Test a query of paper name that returns the correct answer"""
output = api_client.run("Heat-bath random walks with Markov bases")
assert "Probability distributions for Markov chains based quantum walks" in output
assert (
"Transformations of random walks on groups via Markov stopping times" in output
)
assert (
"Recurrence of Multidimensional Persistent Random Walks. Fourier and Series "
"Criteria" in output
)
def test_run_success_arxiv_identifier(api_client: ArxivAPIWrapper) -> None:
"""Test a query of an arxiv identifier returns the correct answer"""
output = api_client.run("1605.08386v1")
assert "Heat-bath random walks with Markov bases" in output
def test_run_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper) -> None:
"""Test a query of multiple arxiv identifiers that returns the correct answer"""
output = api_client.run("1605.08386v1 2212.00794v2 2308.07912")
assert "Heat-bath random walks with Markov bases" in output
assert "Scaling Language-Image Pre-training via Masking" in output
assert (
"Ultra-low mass PBHs in the early universe can explain the PTA signal" in output
)
def test_run_returns_several_docs(api_client: ArxivAPIWrapper) -> None:
"""Test that returns several docs"""
output = api_client.run("Caprice Stanley")
assert "On Mixing Behavior of a Family of Random Walks" in output
def test_run_returns_no_result(api_client: ArxivAPIWrapper) -> None:
"""Test that gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good Arxiv Result was found" == output
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
def test_load_success_paper_name(api_client: ArxivAPIWrapper) -> None:
"""Test a query of paper name that returns one document"""
docs = api_client.load("Heat-bath random walks with Markov bases")
assert len(docs) == 3
assert_docs(docs)
def test_load_success_arxiv_identifier(api_client: ArxivAPIWrapper) -> None:
"""Test a query of an arxiv identifier that returns one document"""
docs = api_client.load("1605.08386v1")
assert len(docs) == 1
assert_docs(docs)
def test_load_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper) -> None:
"""Test a query of arxiv identifiers that returns the correct answer"""
docs = api_client.load("1605.08386v1 2212.00794v2 2308.07912")
assert len(docs) == 3
assert_docs(docs)
def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None:
"""Test that returns no docs"""
docs = api_client.load("1605.08386WWW")
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
api_client = ArxivAPIWrapper(load_max_docs=expected_docs) # type: ignore[call-arg]
docs = api_client.load("ChatGPT")
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_limited_doc_content_chars() -> None:
"""Test that returns limited doc_content_chars_max"""
doc_content_chars_max = 100
api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) # type: ignore[call-arg]
docs = api_client.load("1605.08386")
assert len(docs[0].page_content) == doc_content_chars_max
def test_load_returns_unlimited_doc_content_chars() -> None:
"""Test that returns unlimited doc_content_chars_max"""
doc_content_chars_max = None
api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) # type: ignore[call-arg]
docs = api_client.load("1605.08386")
assert len(docs[0].page_content) == pytest.approx(54338, rel=1e-2)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg]
docs = api_client.load("ChatGPT")
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Published", "Title", "Authors", "Summary"}
)
print(doc.metadata) # noqa: T201
assert len(set(doc.metadata)) > 4
def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool:
from langchain.agents.load_tools import load_tools
tools = load_tools(["arxiv"], **kwargs)
assert len(tools) == 1, "loaded more than 1 tool"
return tools[0]
def test_load_arxiv_from_universal_entry() -> None:
arxiv_tool = _load_arxiv_from_universal_entry()
output = arxiv_tool.invoke("Caprice Stanley")
assert (
"On Mixing Behavior of a Family of Random Walks" in output
), "failed to fetch a valid result"
def test_load_arxiv_from_universal_entry_with_params() -> None:
params = {
"top_k_results": 1,
"load_max_docs": 10,
"load_all_available_meta": True,
}
arxiv_tool = _load_arxiv_from_universal_entry(**params)
assert isinstance(arxiv_tool, ArxivQueryRun)
wp = arxiv_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
assert wp.load_max_docs == 10, "failed to assert load_max_docs"
assert (
wp.load_all_available_meta is True
), "failed to assert load_all_available_meta"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_serpapi.py | """Integration test for SerpAPI."""
from langchain_community.utilities import SerpAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
chain = SerpAPIWrapper() # type: ignore[call-arg]
output = chain.run("What was Obama's first name?")
assert output == "Barack Hussein Obama II"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_powerbi_api.py | """Integration test for POWERBI API Wrapper."""
import pytest
from langchain_core.utils import get_from_env
from langchain_community.utilities.powerbi import PowerBIDataset
def azure_installed() -> bool:
try:
from azure.core.credentials import TokenCredential # noqa: F401
from azure.identity import DefaultAzureCredential # noqa: F401
return True
except Exception as e:
print(f"azure not installed, skipping test {e}") # noqa: T201
return False
@pytest.mark.skipif(not azure_installed(), reason="requires azure package")
def test_daxquery() -> None:
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env("", "POWERBI_DATASET_ID")
TABLE_NAME = get_from_env("", "POWERBI_TABLE_NAME")
NUM_ROWS = get_from_env("", "POWERBI_NUMROWS")
powerbi = PowerBIDataset(
dataset_id=DATASET_ID,
table_names=[TABLE_NAME],
credential=DefaultAzureCredential(),
)
output = powerbi.run(f'EVALUATE ROW("RowCount", COUNTROWS({TABLE_NAME}))')
numrows = str(output["results"][0]["tables"][0]["rows"][0]["[RowCount]"])
assert NUM_ROWS == numrows
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_outline.py | """Integration test for Outline API Wrapper."""
from typing import List
import pytest
import responses
from langchain_core.documents import Document
from langchain_community.utilities import OutlineAPIWrapper
OUTLINE_INSTANCE_TEST_URL = "https://app.getoutline.com"
OUTLINE_SUCCESS_RESPONSE = {
"data": [
{
"ranking": 0.3911583,
"context": "Testing Context",
"document": {
"id": "abb2bf15-a597-4255-8b19-b742e3d037bf",
"url": "/doc/quick-start-jGuGGGOTuL",
"title": "Test Title",
"text": "Testing Content",
"createdBy": {"name": "John Doe"},
"revision": 3,
"collectionId": "93f182a4-a591-4d47-83f0-752e7bb2065c",
"parentDocumentId": None,
},
},
],
"status": 200,
"ok": True,
}
OUTLINE_EMPTY_RESPONSE = {
"data": [],
"status": 200,
"ok": True,
}
OUTLINE_ERROR_RESPONSE = {
"ok": False,
"error": "authentication_required",
"status": 401,
"message": "Authentication error",
}
@pytest.fixture
def api_client() -> OutlineAPIWrapper:
return OutlineAPIWrapper(
outline_api_key="api_key", outline_instance_url=OUTLINE_INSTANCE_TEST_URL
)
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
@responses.activate
def test_run_success(api_client: OutlineAPIWrapper) -> None:
responses.add(
responses.POST,
api_client.outline_instance_url + api_client.outline_search_endpoint, # type: ignore[operator]
json=OUTLINE_SUCCESS_RESPONSE,
status=200,
)
docs = api_client.run("Testing")
assert_docs(docs, all_meta=False)
@responses.activate
def test_run_success_all_meta(api_client: OutlineAPIWrapper) -> None:
api_client.load_all_available_meta = True
responses.add(
responses.POST,
api_client.outline_instance_url + api_client.outline_search_endpoint, # type: ignore[operator]
json=OUTLINE_SUCCESS_RESPONSE,
status=200,
)
docs = api_client.run("Testing")
assert_docs(docs, all_meta=True)
@responses.activate
def test_run_no_result(api_client: OutlineAPIWrapper) -> None:
responses.add(
responses.POST,
api_client.outline_instance_url + api_client.outline_search_endpoint, # type: ignore[operator]
json=OUTLINE_EMPTY_RESPONSE,
status=200,
)
docs = api_client.run("No Result Test")
assert not docs
@responses.activate
def test_run_error(api_client: OutlineAPIWrapper) -> None:
responses.add(
responses.POST,
api_client.outline_instance_url + api_client.outline_search_endpoint, # type: ignore[operator]
json=OUTLINE_ERROR_RESPONSE,
status=401,
)
try:
api_client.run("Testing")
except Exception as e:
assert "Outline API returned an error:" in str(e)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_golden_query_api.py | """Integration test for Golden API Wrapper."""
import json
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = GoldenQueryAPIWrapper()
output = json.loads(search.run("companies in nanotech"))
assert len(output.get("results", [])) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_merriam_webster_api.py | """Integration test for Merriam Webster API Wrapper."""
import pytest
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
@pytest.fixture
def api_client() -> MerriamWebsterAPIWrapper:
return MerriamWebsterAPIWrapper()
def test_call(api_client: MerriamWebsterAPIWrapper) -> None:
"""Test that call gives correct answer."""
output = api_client.run("LLM")
assert "large language model" in output
def test_call_no_result(api_client: MerriamWebsterAPIWrapper) -> None:
"""Test that non-existent words return proper result."""
output = api_client.run("NO_RESULT_NO_RESULT_NO_RESULT")
assert "No Merriam-Webster definition was found for query" in output
def test_call_alternatives(api_client: MerriamWebsterAPIWrapper) -> None:
"""
Test that non-existent queries that are close to an
existing definition return proper result.
"""
output = api_client.run("It's raining cats and dogs")
assert "No Merriam-Webster definition was found for query" in output
assert "You can try one of the following alternative queries" in output
assert "raining cats and dogs" in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_google_trends.py | """Unit test for Google Trends API Wrapper."""
import os
from unittest.mock import patch
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
@patch("serpapi.SerpApiClient.get_json")
def test_unexpected_response(mocked_serpapiclient): # type: ignore[no-untyped-def]
os.environ["SERPAPI_API_KEY"] = "123abcd"
resp = {
"search_metadata": {
"id": "659f32ec36e6a9107b46b5b4",
"status": "Error",
"json_endpoint": "https://serpapi.com/searches/.../659f32ec36e6a9107b46b5b4.json",
"created_at": "2024-01-11 00:14:36 UTC",
"processed_at": "2024-01-11 00:14:36 UTC",
"google_trends_url": "https://trends.google.com/trends/api/explore?tz=420&req=%7B%22comparisonItem%22%3A%5B%7B%22keyword%22%3A%22Lego+building+trends+2022%22%2C%22geo%22%3A%22%22%2C%22time%22%3A%22today+12-m%22%7D%5D%2C%22category%22%3A0%2C%22property%22%3A%22%22%2C%22userConfig%22%3A%22%7BuserType%3A+%5C%22USER_TYPE_LEGIT_USER%5C%22%7D%22%7D",
"prettify_html_file": "https://serpapi.com/searches/.../659f32ec36e6a9107b46b5b4.prettify",
"total_time_taken": 90.14,
},
"search_parameters": {
"engine": "google_trends",
"q": "Lego building trends 2022",
"date": "today 12-m",
"tz": "420",
"data_type": "TIMESERIES",
},
"error": "We couldn't get valid ... Please try again later.",
}
mocked_serpapiclient.return_value = resp
tool = GoogleTrendsAPIWrapper()
tool.run("does not matter")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_jira_api.py | """Integration test for JIRA API Wrapper."""
from langchain_community.utilities.jira import JiraAPIWrapper
def test_search() -> None:
"""Test for Searching issues on JIRA"""
jql = "project = TP"
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("jql", jql)
assert "issues" in output
def test_getprojects() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("get_projects", "")
assert "projects" in output
def test_create_ticket() -> None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description",'
' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("create_issue", issue_string)
assert "id" in output
assert "key" in output
def test_create_confluence_page() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
create_page_dict = (
'{"space": "ROC", "title":"This is the title",'
'"body":"This is the body. You can use '
'<strong>HTML tags</strong>!"}'
)
output = jira.run("create_page", create_page_dict)
assert "type" in output
assert "page" in output
def test_other() -> None:
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run("other", issue_create_dict)
assert "id" in output
assert "key" in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_nasa.py | """Integration test for NASA API Wrapper."""
from langchain_community.utilities.nasa import NasaAPIWrapper
def test_media_search() -> None:
"""Test for NASA Image and Video Library media search"""
nasa = NasaAPIWrapper()
query = '{"q": "saturn", + "year_start": "2002", "year_end": "2010", "page": 2}'
output = nasa.run("search_media", query)
assert output is not None
assert "collection" in output
def test_get_media_metadata_manifest() -> None:
"""Test for retrieving media metadata manifest from NASA Image and Video Library"""
nasa = NasaAPIWrapper()
output = nasa.run("get_media_metadata_manifest", "2022_0707_Recientemente")
assert output is not None
def test_get_media_metadata_location() -> None:
"""Test for retrieving media metadata location from NASA Image and Video Library"""
nasa = NasaAPIWrapper()
output = nasa.run("get_media_metadata_location", "as11-40-5874")
assert output is not None
def test_get_video_captions_location() -> None:
"""Test for retrieving video captions location from NASA Image and Video Library"""
nasa = NasaAPIWrapper()
output = nasa.run("get_video_captions_location", "172_ISS-Slosh.sr")
assert output is not None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import Any, List
import pytest
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from langchain_community.tools import PubmedQueryRun
from langchain_community.utilities import PubMedAPIWrapper
xmltodict = pytest.importorskip("xmltodict")
@pytest.fixture
def api_client() -> PubMedAPIWrapper:
return PubMedAPIWrapper() # type: ignore[call-arg]
def test_run_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns the correct answer"""
search_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature"
)
output = api_client.run(search_string)
test_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature: Findings and Implications"
)
assert test_string in output
assert len(output) == api_client.doc_content_chars_max
def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good PubMed Result was found" == output
def test_retrieve_article_returns_book_abstract(api_client: PubMedAPIWrapper) -> None:
"""Test that returns the excerpt of a book."""
output_nolabel = api_client.retrieve_article("25905357", "")
output_withlabel = api_client.retrieve_article("29262144", "")
test_string_nolabel = (
"Osteoporosis is a multifactorial disorder associated with low bone mass and "
"enhanced skeletal fragility. Although"
)
assert test_string_nolabel in output_nolabel["Summary"]
assert (
"Wallenberg syndrome was first described in 1808 by Gaspard Vieusseux. However,"
in output_withlabel["Summary"]
)
def test_retrieve_article_returns_article_abstract(
api_client: PubMedAPIWrapper,
) -> None:
"""Test that returns the abstract of an article."""
output_nolabel = api_client.retrieve_article("37666905", "")
output_withlabel = api_client.retrieve_article("37666551", "")
test_string_nolabel = (
"This work aims to: (1) Provide maximal hand force data on six different "
"grasp types for healthy subjects; (2) detect grasp types with maximal "
"force significantly affected by hand osteoarthritis (HOA) in women; (3) "
"look for predictors to detect HOA from the maximal forces using discriminant "
"analyses."
)
assert test_string_nolabel in output_nolabel["Summary"]
test_string_withlabel = (
"OBJECTIVES: To assess across seven hospitals from six different countries "
"the extent to which the COVID-19 pandemic affected the volumes of orthopaedic "
"hospital admissions and patient outcomes for non-COVID-19 patients admitted "
"for orthopaedic care."
)
assert test_string_withlabel in output_withlabel["Summary"]
def test_retrieve_article_no_abstract_available(api_client: PubMedAPIWrapper) -> None:
"""Test that returns 'No abstract available'."""
output = api_client.retrieve_article("10766884", "")
assert "No abstract available" == output["Summary"]
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
def test_load_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns one document"""
docs = api_client.load_docs("chatgpt")
assert len(docs) == api_client.top_k_results == 3
assert_docs(docs)
def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that returns no docs"""
docs = api_client.load_docs("1605.08386WWW")
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
api_client = PubMedAPIWrapper(top_k_results=expected_docs) # type: ignore[call-arg]
docs = api_client.load_docs("ChatGPT")
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg]
docs = api_client.load_docs("ChatGPT")
assert len(docs) == 3
for doc in docs:
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Copyright Information", "Published", "Title", "uid"}
)
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
from langchain.agents.load_tools import load_tools
tools = load_tools(["pubmed"], **kwargs)
assert len(tools) == 1, "loaded more than 1 tool"
return tools[0]
def test_load_pupmed_from_universal_entry() -> None:
pubmed_tool = _load_pubmed_from_universal_entry()
search_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature"
)
output = pubmed_tool.invoke(search_string)
test_string = (
"Examining the Validity of ChatGPT in Identifying "
"Relevant Nephrology Literature: Findings and Implications"
)
assert test_string in output
def test_load_pupmed_from_universal_entry_with_params() -> None:
params = {
"top_k_results": 1,
}
pubmed_tool = _load_pubmed_from_universal_entry(**params)
assert isinstance(pubmed_tool, PubmedQueryRun)
wp = pubmed_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_googleserper_api.py | """Integration test for Serper.dev's Google Search API Wrapper."""
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
def test_search_call() -> None:
"""Test that call gives the correct answer from search."""
search = GoogleSerperAPIWrapper()
output = search.run("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
def test_news_call() -> None:
"""Test that call gives the correct answer from news search."""
search = GoogleSerperAPIWrapper(type="news")
output = search.run("What's new with stock market?").lower()
assert "stock" in output or "market" in output
async def test_results() -> None:
"""Test that call gives the correct answer."""
search = GoogleSerperAPIWrapper()
output = search.results("What was Obama's first name?")
assert "Barack Hussein Obama II" in output["answerBox"]["answer"]
async def test_async_call() -> None:
"""Test that call gives the correct answer."""
search = GoogleSerperAPIWrapper()
output = await search.arun("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
async def test_async_results() -> None:
"""Test that call gives the correct answer."""
search = GoogleSerperAPIWrapper()
output = await search.aresults("What was Obama's first name?")
assert "Barack Hussein Obama II" in output["answerBox"]["answer"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_stackexchange.py | """Integration test for Stack Exchange."""
from langchain_community.utilities import StackExchangeAPIWrapper
def test_call() -> None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("zsh: command not found: python")
assert output != "hello"
def test_failure() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("sjefbsmnf")
assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow"
def test_success() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("zsh: command not found: python")
assert "zsh: command not found: python" in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_wolfram_alpha_api.py | """Integration test for Wolfram Alpha API Wrapper."""
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper() # type: ignore[call-arg]
output = search.run("what is 2x+18=x+5?")
assert "x = -13" in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_dataforseo_api.py | """Integration test for Dataforseo API Wrapper."""
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
def test_search_call() -> None:
search = DataForSeoAPIWrapper()
output = search.run("pi value")
assert "3.14159" in output
def test_news_call() -> None:
search = DataForSeoAPIWrapper(
params={"se_type": "news"}, json_result_fields=["title", "snippet"]
)
output = search.results("iphone")
assert any("Apple" in d["title"] or "Apple" in d["snippet"] for d in output)
def test_loc_call() -> None:
search = DataForSeoAPIWrapper(
params={"location_name": "Spain", "language_code": "es"}
)
output = search.results("iphone")
assert "/es/" in output[0]["url"]
def test_maps_call() -> None:
search = DataForSeoAPIWrapper(
params={"location_name": "Spain", "language_code": "es", "se_type": "maps"}
)
output = search.results("coffee")
assert all(i["address_info"]["country_code"] == "ES" for i in output)
def test_events_call() -> None:
search = DataForSeoAPIWrapper(
params={"location_name": "Spain", "language_code": "es", "se_type": "events"}
)
output = search.results("concerts")
assert any(
"Madrid" in ((i["location_info"] or dict())["address"] or "") for i in output
)
async def test_async_call() -> None:
search = DataForSeoAPIWrapper()
output = await search.arun("pi value")
assert "3.14159" in output
async def test_async_results() -> None:
search = DataForSeoAPIWrapper(json_result_types=["answer_box"])
output = await search.aresults("New York timezone")
assert "Eastern Daylight Time" in output[0]["text"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_dataherald_api.py | """Integration test for Dataherald API Wrapper."""
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = DataheraldAPIWrapper(db_connection_id="65fb766367dd22c99ce1a12d") # type: ignore[call-arg]
output = search.run("How many employees are in the company?")
assert "Answer: SELECT \n COUNT(*) FROM \n employees" in output
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_searchapi.py | """Integration tests for SearchApi"""
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
def test_call() -> None:
"""Test that call gives correct answer."""
search = SearchApiAPIWrapper()
output = search.run("What is the capital of Lithuania?")
assert "Vilnius" in output
def test_results() -> None:
"""Test that call gives correct answer."""
search = SearchApiAPIWrapper()
output = search.results("What is the capital of Lithuania?")
assert "Vilnius" in output["answer_box"]["answer"]
assert "Vilnius" in output["answer_box"]["snippet"]
assert "Vilnius" in output["knowledge_graph"]["description"]
assert "Vilnius" in output["organic_results"][0]["snippet"]
def test_results_with_custom_params() -> None:
"""Test that call gives correct answer with custom params."""
search = SearchApiAPIWrapper()
output = search.results(
"cafeteria",
hl="es",
gl="es",
google_domain="google.es",
location="Madrid, Spain",
)
assert "Madrid" in output["search_information"]["detected_location"]
def test_scholar_call() -> None:
"""Test that call gives correct answer for scholar search."""
search = SearchApiAPIWrapper(engine="google_scholar")
output = search.run("large language models")
assert "state of large language models and their applications" in output
def test_jobs_call() -> None:
"""Test that call gives correct answer for jobs search."""
search = SearchApiAPIWrapper(engine="google_jobs")
output = search.run("AI")
assert "years of experience" in output
async def test_async_call() -> None:
"""Test that call gives the correct answer."""
search = SearchApiAPIWrapper()
output = await search.arun("What is Obama's full name?")
assert "Barack Hussein Obama II" in output
async def test_async_results() -> None:
"""Test that call gives the correct answer."""
search = SearchApiAPIWrapper()
output = await search.aresults("What is Obama's full name?")
assert "Barack Hussein Obama II" in output["knowledge_graph"]["description"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_reddit_search_api.py | import pytest
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
@pytest.fixture
def api_client() -> RedditSearchAPIWrapper:
return RedditSearchAPIWrapper() # type: ignore[call-arg]
def assert_results_exists(results: list) -> None:
if len(results) > 0:
for result in results:
assert "post_title" in result
assert "post_author" in result
assert "post_subreddit" in result
assert "post_text" in result
assert "post_url" in result
assert "post_score" in result
assert "post_category" in result
assert "post_id" in result
else:
assert results == []
@pytest.mark.requires("praw")
def test_run_empty_query(api_client: RedditSearchAPIWrapper) -> None:
"""Test that run gives the correct answer with empty query."""
search = api_client.run(
query="", sort="relevance", time_filter="all", subreddit="all", limit=5
)
assert search == "Searching r/all did not find any posts:"
@pytest.mark.requires("praw")
def test_run_query(api_client: RedditSearchAPIWrapper) -> None:
"""Test that run gives the correct answer."""
search = api_client.run(
query="university",
sort="relevance",
time_filter="all",
subreddit="funny",
limit=5,
)
assert "University" in search
@pytest.mark.requires("praw")
def test_results_exists(api_client: RedditSearchAPIWrapper) -> None:
"""Test that results gives the correct output format."""
search = api_client.results(
query="What is the best programming language?",
sort="relevance",
time_filter="all",
subreddit="all",
limit=10,
)
assert_results_exists(search)
@pytest.mark.requires("praw")
def test_results_empty_query(api_client: RedditSearchAPIWrapper) -> None:
"""Test that results gives the correct output with empty query."""
search = api_client.results(
query="", sort="relevance", time_filter="all", subreddit="all", limit=10
)
assert search == []
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_bing_search.py | """Integration test for Bing Search API Wrapper."""
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = BingSearchAPIWrapper() # type: ignore[call-arg]
output = search.run("Obama's first name")
assert "Barack Hussein Obama" in output
def test_results() -> None:
"""Test that call gives the correct answer."""
search = BingSearchAPIWrapper() # type: ignore[call-arg]
results = search.results("Obama's first name", num_results=5)
result_contents = "\n".join(
f"{result['title']}: {result['snippet']}" for result in results
)
assert "Barack Hussein Obama" in result_contents
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_passio_nutrition_ai.py | """Integration test for Bing Search API Wrapper."""
from langchain_core.utils import get_from_env
from langchain_community.utilities.passio_nutrition_ai import (
ManagedPassioLifeAuth,
NutritionAIAPI,
)
def test_call() -> None:
"""Test that call gives the correct answer."""
api_key = get_from_env("", "NUTRITIONAI_SUBSCRIPTION_KEY")
search = NutritionAIAPI(
nutritionai_subscription_key=api_key, auth_=ManagedPassioLifeAuth(api_key)
)
output = search.run("Chicken tikka masala")
assert output is not None
assert "Chicken tikka masala" == output["results"][0]["displayName"]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_steam_api.py | import ast
from langchain_community.utilities.steam import SteamWebAPIWrapper
def test_get_game_details() -> None:
"""Test for getting game details on Steam"""
steam = SteamWebAPIWrapper() # type: ignore[call-arg]
output = steam.run("get_game_details", "Terraria")
assert "id" in output
assert "link" in output
assert "detailed description" in output
assert "supported languages" in output
assert "price" in output
def test_get_recommended_games() -> None:
"""Test for getting recommended games on Steam"""
steam = SteamWebAPIWrapper() # type: ignore[call-arg]
output = steam.run("get_recommended_games", "76561198362745711")
output = ast.literal_eval(output)
assert len(output) == 5
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_alpha_vantage.py | """Integration test for Alpha Vantage API Wrapper."""
import pytest
from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper
@pytest.fixture
def api_wrapper() -> AlphaVantageAPIWrapper:
# Ensure that the ALPHAVANTAGE_API_KEY environment variable is set
return AlphaVantageAPIWrapper()
def test_search_symbols(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the search symbols API call for successful response."""
response = api_wrapper.search_symbols("AAPL")
assert response is not None
assert isinstance(response, dict)
def test_market_news_sentiment(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the market news sentiment API call for successful response."""
response = api_wrapper._get_market_news_sentiment("AAPL")
assert response is not None
assert isinstance(response, dict)
def test_time_series_daily(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the time series daily API call for successful response."""
response = api_wrapper._get_time_series_daily("AAPL")
assert response is not None
assert isinstance(response, dict)
def test_quote_endpoint(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the quote endpoint API call for successful response."""
response = api_wrapper._get_quote_endpoint("AAPL")
assert response is not None
assert isinstance(response, dict)
def test_time_series_weekly(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the time series weekly API call for successful response."""
response = api_wrapper._get_time_series_weekly("AAPL")
assert response is not None
assert isinstance(response, dict)
def test_top_gainers_losers(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the top gainers and losers API call for successful response."""
response = api_wrapper._get_top_gainers_losers()
assert response is not None
assert isinstance(response, dict)
def test_exchange_rate(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the exchange rate API call for successful response."""
response = api_wrapper._get_exchange_rate("USD", "EUR")
assert response is not None
assert isinstance(response, dict)
def test_run_method(api_wrapper: AlphaVantageAPIWrapper) -> None:
"""Test the run method for successful response."""
response = api_wrapper.run("USD", "EUR")
assert response is not None
assert isinstance(response, dict)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_wikipedia_api.py | """Integration test for Wikipedia API Wrapper."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.utilities import WikipediaAPIWrapper
@pytest.fixture
def api_client() -> WikipediaAPIWrapper:
return WikipediaAPIWrapper() # type: ignore[call-arg]
def test_run_success(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run("HUNTER X HUNTER")
assert "Yoshihiro Togashi" in output
def test_run_no_result(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert "No good Wikipedia Search Result was found" == output
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(api_client: WikipediaAPIWrapper) -> None:
api_client.load_all_available_meta = True
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_more_docs_success(api_client: WikipediaAPIWrapper) -> None:
top_k_results = 20
api_client = WikipediaAPIWrapper(top_k_results=top_k_results) # type: ignore[call-arg]
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 10
assert len(docs) <= top_k_results
assert_docs(docs, all_meta=False)
def test_load_no_result(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_polygon.py | """Integration test for Polygon API Wrapper."""
from langchain_community.utilities.polygon import PolygonAPIWrapper
def test_get_last_quote() -> None:
"""Test for getting the last quote of a ticker from the Polygon API."""
polygon = PolygonAPIWrapper()
output = polygon.run("get_last_quote", "AAPL")
assert output is not None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py | """Integration tests for the TensorFlow Dataset client."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain_core.documents import Document
from pydantic import ValidationError
from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
if TYPE_CHECKING:
import tensorflow as tf
def decode_to_str(item: tf.Tensor) -> str:
return item.numpy().decode("utf-8")
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
MAX_DOCS = 10
@pytest.fixture
def tfds_client() -> TensorflowDatasets:
return TensorflowDatasets( # type: ignore[call-arg]
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
def test_load_success(tfds_client: TensorflowDatasets) -> None:
"""Test that returns the correct answer"""
output = tfds_client.load()
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_load_fail_wrong_dataset_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets( # type: ignore[call-arg]
dataset_name="wrong_dataset_name",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "the dataset name is spelled correctly" in str(exc_info.value)
def test_load_fail_wrong_split_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets( # type: ignore[call-arg]
dataset_name="mlqa/en",
split_name="wrong_split_name",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "Unknown split" in str(exc_info.value)
def test_load_fail_no_func() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets( # type: ignore[call-arg]
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
)
assert "Please provide a function" in str(exc_info.value)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_sparkllm.py | """Test SparkLLM Text Embedding."""
from langchain_community.embeddings.sparkllm import SparkLLMTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test SparkLLM Text Embedding for documents."""
documents = [
"iFLYTEK is a well-known intelligent speech and artificial intelligence "
"publicly listed company in the Asia-Pacific Region. Since its establishment,"
"the company is devoted to cornerstone technological research "
"in speech and languages, natural language understanding, machine learning,"
"machine reasoning, adaptive learning, "
"and has maintained the world-leading position in those "
"domains. The company actively promotes the development of A.I. "
"products and their sector-based "
"applications, with visions of enabling machines to listen and speak, "
"understand and think, "
"creating a better world with artificial intelligence."
]
embedding = SparkLLMTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1 # type: ignore[arg-type]
assert len(output[0]) == 2560 # type: ignore[index]
def test_baichuan_embedding_query() -> None:
"""Test SparkLLM Text Embedding for query."""
document = (
"iFLYTEK Open Platform was launched in 2010 by iFLYTEK as China’s "
"first Artificial Intelligence open platform for Mobile Internet "
"and intelligent hardware developers"
)
embedding = SparkLLMTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 2560 # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.35.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
import pytest
from langchain_community.embeddings import VertexAIEmbeddings
def test_embedding_documents() -> None:
documents = ["foo bar"]
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
assert model.model_name == "textembedding-gecko@001"
def test_embedding_query() -> None:
document = "foo bar"
model = VertexAIEmbeddings()
output = model.embed_query(document)
assert len(output) == 768
def test_large_batches() -> None:
documents = ["foo bar" for _ in range(0, 251)]
model_uscentral1 = VertexAIEmbeddings(location="us-central1")
model_asianortheast1 = VertexAIEmbeddings(location="asia-northeast1")
model_uscentral1.embed_documents(documents)
model_asianortheast1.embed_documents(documents)
assert model_uscentral1.instance["batch_size"] >= 250
assert model_asianortheast1.instance["batch_size"] < 50
def test_paginated_texts() -> None:
documents = [
"foo bar",
"foo baz",
"bar foo",
"baz foo",
"bar bar",
"foo foo",
"baz baz",
"baz bar",
]
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 8
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
def test_warning(caplog: pytest.LogCaptureFixture) -> None:
_ = VertexAIEmbeddings()
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "WARNING"
expected_message = (
"Model_name will become a required arg for VertexAIEmbeddings starting from "
"Feb-01-2024. Currently the default is set to textembedding-gecko@001"
)
assert record.message == expected_message
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_volcano.py | """Test Bytedance Volcano Embedding."""
from langchain_community.embeddings import VolcanoEmbeddings
def test_embedding_documents() -> None:
"""Test embeddings for documents."""
documents = ["foo", "bar"]
embedding = VolcanoEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
def test_embedding_query() -> None:
"""Test embeddings for query."""
document = "foo bar"
embedding = VolcanoEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_baichuan.py | """Test Baichuan Text Embedding."""
from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test Baichuan Text Embedding for documents."""
documents = ["今天天气不错", "今天阳光灿烂"]
embedding = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2 # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
def test_baichuan_embedding_query() -> None:
"""Test Baichuan Text Embedding for query."""
document = "所有的小学生都会学过只因兔同笼问题。"
embedding = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 1024 # type: ignore[arg-type]
def test_baichuan_embeddings_multi_documents() -> None:
"""Test Baichuan Text Embedding for documents with multi texts."""
document = "午餐吃了螺蛳粉"
doc_amount = 35
embeddings = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embeddings.embed_documents([document] * doc_amount)
assert len(output) == doc_amount # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_naver.py | """Test Naver embeddings."""
from langchain_community.embeddings import ClovaXEmbeddings
def test_embedding_documents() -> None:
"""Test cohere embeddings."""
documents = ["foo bar"]
embedding = ClovaXEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0
async def test_aembedding_documents() -> None:
"""Test cohere embeddings."""
documents = ["foo bar"]
embedding = ClovaXEmbeddings()
output = await embedding.aembed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0
def test_embedding_query() -> None:
"""Test cohere embeddings."""
document = "foo bar"
embedding = ClovaXEmbeddings()
output = embedding.embed_query(document)
assert len(output) > 0
async def test_aembedding_query() -> None:
"""Test cohere embeddings."""
document = "foo bar"
embedding = ClovaXEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_zhipuai.py | """Test ZhipuAI Text Embedding."""
from langchain_community.embeddings.zhipuai import ZhipuAIEmbeddings
def test_zhipuai_embedding_documents() -> None:
"""Test ZhipuAI Text Embedding for documents."""
documents = ["This is a test query1.", "This is a test query2."]
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_documents(documents)
assert len(res) == 2 # type: ignore[arg-type]
assert len(res[0]) == 1024 # type: ignore[index]
def test_zhipuai_embedding_query() -> None:
"""Test ZhipuAI Text Embedding for query."""
document = "This is a test query."
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 1024 # type: ignore[arg-type]
def test_zhipuai_embedding_dimensions() -> None:
"""Test ZhipuAI Text Embedding for query by assigning dimensions"""
document = "This is a test query."
embedding = ZhipuAIEmbeddings(
model="embedding-3",
dimensions=2048,
) # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 2048 # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_voyageai.py | """Test voyage embeddings."""
from langchain_community.embeddings.voyageai import VoyageEmbeddings
# Please set VOYAGE_API_KEY in the environment variables
MODEL = "voyage-2"
def test_voyagi_embedding_documents() -> None:
"""Test voyage embeddings."""
documents = ["foo bar"]
embedding = VoyageEmbeddings(model=MODEL) # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1024
def test_voyagi_with_default_model() -> None:
"""Test voyage embeddings."""
embedding = VoyageEmbeddings() # type: ignore[call-arg]
assert embedding.model == "voyage-01"
assert embedding.batch_size == 7
documents = [f"foo bar {i}" for i in range(72)]
output = embedding.embed_documents(documents)
assert len(output) == 72
assert len(output[0]) == 1024
def test_voyage_embedding_documents_multiple() -> None:
"""Test voyage embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = VoyageEmbeddings(model=MODEL, batch_size=2)
assert embedding.model == MODEL
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1024
assert len(output[1]) == 1024
assert len(output[2]) == 1024
def test_voyage_embedding_query() -> None:
"""Test voyage embeddings."""
document = "foo bar"
embedding = VoyageEmbeddings(model=MODEL) # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_openai.py | """Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
@pytest.mark.scheduled
def test_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ # type: ignore[attr-defined]
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = OpenAIEmbeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = OpenAIEmbeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_edenai.py | """Test edenai embeddings."""
from langchain_community.embeddings.edenai import EdenAiEmbeddings
def test_edenai_embedding_documents() -> None:
"""Test edenai embeddings with openai."""
documents = ["foo bar", "test text"]
embedding = EdenAiEmbeddings(provider="openai") # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1536
assert len(output[1]) == 1536
def test_edenai_embedding_query() -> None:
"""Test eden ai embeddings with google."""
document = "foo bar"
embedding = EdenAiEmbeddings(provider="google") # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_octoai_embeddings.py | """Test octoai embeddings."""
from langchain_community.embeddings.octoai_embeddings import (
OctoAIEmbeddings,
)
def test_octoai_embedding_documents() -> None:
"""Test octoai embeddings."""
documents = ["foo bar"]
embedding = OctoAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1024
def test_octoai_embedding_query() -> None:
"""Test octoai embeddings."""
document = "foo bar"
embedding = OctoAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_mosaicml.py | """Test mosaicml embeddings."""
from langchain_community.embeddings.mosaicml import MosaicMLInstructorEmbeddings
def test_mosaicml_embedding_documents() -> None:
"""Test MosaicML embeddings."""
documents = ["foo bar"]
embedding = MosaicMLInstructorEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_mosaicml_embedding_documents_multiple() -> None:
"""Test MosaicML embeddings with multiple documents."""
documents = ["foo bar", "bar foo", "foo"]
embedding = MosaicMLInstructorEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
assert len(output[1]) == 768
assert len(output[2]) == 768
def test_mosaicml_embedding_query() -> None:
"""Test MosaicML embeddings of queries."""
document = "foo bar"
embedding = MosaicMLInstructorEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
def test_mosaicml_embedding_endpoint() -> None:
"""Test MosaicML embeddings with a different endpoint"""
documents = ["foo bar"]
embedding = MosaicMLInstructorEmbeddings(
endpoint_url=(
"https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict"
)
)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_mosaicml_embedding_query_instruction() -> None:
"""Test MosaicML embeddings with a different query instruction."""
document = "foo bar"
embedding = MosaicMLInstructorEmbeddings(query_instruction="Embed this query:")
output = embedding.embed_query(document)
assert len(output) == 768
def test_mosaicml_embedding_document_instruction() -> None:
"""Test MosaicML embeddings with a different query instruction."""
documents = ["foo bar"]
embedding = MosaicMLInstructorEmbeddings(embed_instruction="Embed this document:")
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_cloudflare_workersai.py | """Test Cloudflare Workers AI embeddings."""
import responses
from langchain_community.embeddings.cloudflare_workersai import (
CloudflareWorkersAIEmbeddings,
)
@responses.activate
def test_cloudflare_workers_ai_embedding_documents() -> None:
"""Test Cloudflare Workers AI embeddings."""
documents = ["foo bar", "foo bar", "foo bar"]
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5",
json={
"result": {
"shape": [3, 768],
"data": [[0.0] * 768, [0.0] * 768, [0.0] * 768],
},
"success": "true",
"errors": [],
"messages": [],
},
)
embeddings = CloudflareWorkersAIEmbeddings(account_id="123", api_token="abc")
output = embeddings.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
@responses.activate
def test_cloudflare_workers_ai_embedding_query() -> None:
"""Test Cloudflare Workers AI embeddings."""
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5",
json={
"result": {"shape": [1, 768], "data": [[0.0] * 768]},
"success": "true",
"errors": [],
"messages": [],
},
)
document = "foo bar"
embeddings = CloudflareWorkersAIEmbeddings(account_id="123", api_token="abc")
output = embeddings.embed_query(document)
assert len(output) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_sentence_transformer.py | # flake8: noqa
"""Test sentence_transformer embeddings."""
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
def test_sentence_transformer_embedding_documents() -> None:
"""Test sentence_transformer embeddings."""
embedding = SentenceTransformerEmbeddings()
documents = ["foo bar"]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 384
def test_sentence_transformer_embedding_query() -> None:
"""Test sentence_transformer embeddings."""
embedding = SentenceTransformerEmbeddings()
query = "what the foo is a bar?"
query_vector = embedding.embed_query(query)
assert len(query_vector) == 384
def test_sentence_transformer_db_query() -> None:
"""Test sentence_transformer similarity search."""
embedding = SentenceTransformerEmbeddings()
texts = [
"we will foo your bar until you can't foo any more",
"the quick brown fox jumped over the lazy dog",
]
query = "what the foo is a bar?"
query_vector = embedding.embed_query(query)
assert len(query_vector) == 384
db = InMemoryVectorStore(embedding=embedding)
db.add_texts(texts)
docs = db.similarity_search_by_vector(query_vector, k=2)
assert docs[0].page_content == "we will foo your bar until you can't foo any more"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_tensorflow_hub.py | """Test TensorflowHub embeddings."""
from langchain_community.embeddings import TensorflowHubEmbeddings
def test_tensorflowhub_embedding_documents() -> None:
"""Test tensorflowhub embeddings."""
documents = ["foo bar"]
embedding = TensorflowHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512
def test_tensorflowhub_embedding_query() -> None:
"""Test tensorflowhub embeddings."""
document = "foo bar"
embedding = TensorflowHubEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 512
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_laser.py | """Test LASER embeddings."""
import pytest
from langchain_community.embeddings.laser import LaserEmbeddings
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_documents(lang: str) -> None:
"""Test laser embeddings for documents.
User warning is returned by LASER library implementation
so will ignore in testing."""
documents = ["hello", "world"]
embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2 # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_query(lang: str) -> None:
"""Test laser embeddings for query.
User warning is returned by LASER library implementation
so will ignore in testing."""
query = "hello world"
embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg]
output = embedding.embed_query(query)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_johnsnowlabs.py | """Test johnsnowlabs embeddings."""
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
def test_johnsnowlabs_embed_document() -> None:
"""Test johnsnowlabs embeddings."""
documents = ["foo bar", "bar foo"]
embedding = JohnSnowLabsEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 128
def test_johnsnowlabs_embed_query() -> None:
"""Test johnsnowlabs embeddings."""
document = "foo bar"
embedding = JohnSnowLabsEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 128
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_huggingface.py | """Test huggingface embeddings."""
from langchain_community.embeddings.huggingface import (
HuggingFaceEmbeddings,
HuggingFaceInstructEmbeddings,
)
def test_huggingface_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingface_embedding_query() -> None:
"""Test huggingface embeddings."""
document = "foo bar"
embedding = HuggingFaceEmbeddings(encode_kwargs={"batch_size": 16})
output = embedding.embed_query(document)
assert len(output) == 768
def test_huggingface_instructor_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
model_name = "hkunlp/instructor-base"
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingface_instructor_embedding_query() -> None:
"""Test huggingface embeddings."""
query = "foo bar"
model_name = "hkunlp/instructor-base"
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_query(query)
assert len(output) == 768
def test_huggingface_instructor_embedding_normalize() -> None:
"""Test huggingface embeddings."""
query = "foo bar"
model_name = "hkunlp/instructor-base"
encode_kwargs = {"normalize_embeddings": True}
embedding = HuggingFaceInstructEmbeddings(
model_name=model_name, encode_kwargs=encode_kwargs
)
output = embedding.embed_query(query)
assert len(output) == 768
eps = 1e-5
norm = sum([o**2 for o in output])
assert abs(1 - norm) <= eps
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py | """Test IPEX LLM"""
import os
import pytest
from langchain_community.embeddings import IpexLLMBgeEmbeddings
model_ids_to_test = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS environment variable not set.",
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
device = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_DEVICE") or "cpu"
sentence = "IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., \
local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency."
query = "What is IPEX-LLM?"
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_documents(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_documents([sentence, query])
assert len(output) == 2
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_query(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_query(query)
assert isinstance(output, list)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_dashscope.py | """Test dashscope embeddings."""
import numpy as np
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
def test_dashscope_embedding_documents() -> None:
"""Test dashscope embeddings."""
documents = ["foo bar"]
embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
def test_dashscope_embedding_documents_multiple() -> None:
"""Test dashscope embeddings."""
documents = [
"foo bar",
"bar foo",
"foo",
"foo0",
"foo1",
"foo2",
"foo3",
"foo4",
"foo5",
"foo6",
"foo7",
"foo8",
"foo9",
"foo10",
"foo11",
"foo12",
"foo13",
"foo14",
"foo15",
"foo16",
"foo17",
"foo18",
"foo19",
"foo20",
"foo21",
"foo22",
"foo23",
"foo24",
]
embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 28
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
def test_dashscope_embedding_query() -> None:
"""Test dashscope embeddings."""
document = "foo bar"
embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 1536
def test_dashscope_embedding_with_empty_string() -> None:
"""Test dashscope embeddings with empty string."""
import dashscope
document = ["", "abc"]
embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg]
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = dashscope.TextEmbedding.call(
input="", model="text-embedding-v1", text_type="document"
).output["embeddings"][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
if __name__ == "__main__":
test_dashscope_embedding_documents()
test_dashscope_embedding_documents_multiple()
test_dashscope_embedding_query()
test_dashscope_embedding_with_empty_string()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_elasticsearch.py | """Test elasticsearch_embeddings embeddings."""
import pytest
from langchain_community.embeddings.elasticsearch import ElasticsearchEmbeddings
@pytest.fixture
def model_id() -> str:
# Replace with your actual model_id
return "your_model_id"
def test_elasticsearch_embedding_documents(model_id: str) -> None:
"""Test Elasticsearch embedding documents."""
documents = ["foo bar", "bar foo", "foo"]
embedding = ElasticsearchEmbeddings.from_credentials(model_id)
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768 # Change 768 to the expected embedding size
assert len(output[1]) == 768 # Change 768 to the expected embedding size
assert len(output[2]) == 768 # Change 768 to the expected embedding size
def test_elasticsearch_embedding_query(model_id: str) -> None:
"""Test Elasticsearch embedding query."""
document = "foo bar"
embedding = ElasticsearchEmbeddings.from_credentials(model_id)
output = embedding.embed_query(document)
assert len(output) == 768 # Change 768 to the expected embedding size
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_ernie.py | import pytest
from langchain_community.embeddings.ernie import ErnieEmbeddings
def test_embedding_documents_1() -> None:
documents = ["foo bar"]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 384
def test_embedding_documents_2() -> None:
documents = ["foo", "bar"]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
def test_embedding_query() -> None:
query = "foo"
embedding = ErnieEmbeddings()
output = embedding.embed_query(query)
assert len(output) == 384
def test_max_chunks() -> None:
documents = [f"text-{i}" for i in range(20)]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 20
def test_too_many_chunks() -> None:
documents = [f"text-{i}" for i in range(20)]
embedding = ErnieEmbeddings(chunk_size=20)
with pytest.raises(ValueError):
embedding.embed_documents(documents)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_self_hosted.py | """Test self-hosted embeddings."""
from typing import Any
from langchain_community.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
gpu.install_packages(["pip:./"])
return gpu
def test_self_hosted_huggingface_embedding_documents() -> None:
"""Test self-hosted huggingface embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_embedding_query() -> None:
"""Test self-hosted huggingface embeddings."""
document = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_query(document)
assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_instructor_embedding_query() -> None:
"""Test self-hosted huggingface instruct embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_query(query)
assert len(output) == 768
def get_pipeline() -> Any:
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
def inference_fn(pipeline: Any, prompt: str) -> Any:
"""Inference function for testing."""
# Return last hidden state of the model
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
def test_self_hosted_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings( # type: ignore[call-arg]
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
def test_self_hosted_embedding_query() -> None:
"""Test self-hosted custom embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings( # type: ignore[call-arg]
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_query(query)
assert len(output) == 50265
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_fastembed.py | """Test FastEmbed embeddings."""
import pytest
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
@pytest.mark.parametrize(
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
)
@pytest.mark.parametrize("max_length", [50, 512])
@pytest.mark.parametrize("doc_embed_type", ["default", "passage"])
@pytest.mark.parametrize("threads", [0, 10])
@pytest.mark.parametrize("batch_size", [1, 10])
def test_fastembed_embedding_documents(
model_name: str, max_length: int, doc_embed_type: str, threads: int, batch_size: int
) -> None:
"""Test fastembed embeddings for documents."""
documents = ["foo bar", "bar foo"]
embedding = FastEmbedEmbeddings( # type: ignore[call-arg]
model_name=model_name,
max_length=max_length,
doc_embed_type=doc_embed_type, # type: ignore[arg-type]
threads=threads,
batch_size=batch_size,
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
@pytest.mark.parametrize(
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
)
@pytest.mark.parametrize("max_length", [50, 512])
@pytest.mark.parametrize("batch_size", [1, 10])
def test_fastembed_embedding_query(
model_name: str, max_length: int, batch_size: int
) -> None:
"""Test fastembed embeddings for query."""
document = "foo bar"
embedding = FastEmbedEmbeddings(
model_name=model_name, max_length=max_length, batch_size=batch_size
) # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 384
@pytest.mark.parametrize(
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
)
@pytest.mark.parametrize("max_length", [50, 512])
@pytest.mark.parametrize("doc_embed_type", ["default", "passage"])
@pytest.mark.parametrize("threads", [0, 10])
async def test_fastembed_async_embedding_documents(
model_name: str, max_length: int, doc_embed_type: str, threads: int
) -> None:
"""Test fastembed embeddings for documents."""
documents = ["foo bar", "bar foo"]
embedding = FastEmbedEmbeddings( # type: ignore[call-arg]
model_name=model_name,
max_length=max_length,
doc_embed_type=doc_embed_type, # type: ignore[arg-type]
threads=threads,
)
output = await embedding.aembed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
@pytest.mark.parametrize(
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
)
@pytest.mark.parametrize("max_length", [50, 512])
async def test_fastembed_async_embedding_query(
model_name: str, max_length: int
) -> None:
"""Test fastembed embeddings for query."""
document = "foo bar"
embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length) # type: ignore[call-arg]
output = await embedding.aembed_query(document)
assert len(output) == 384
def test_fastembed_embedding_query_with_default_params() -> None:
"""Test fastembed embeddings for query with default model params"""
document = "foo bar"
embedding = FastEmbedEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 384
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_huggingface_hub.py | """Test HuggingFaceHub embeddings."""
import pytest
from langchain_community.embeddings import HuggingFaceHubEmbeddings
def test_huggingfacehub_embedding_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
async def test_huggingfacehub_embedding_async_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = await embedding.aembed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingfacehub_embedding_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 768
async def test_huggingfacehub_embedding_async_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = await embedding.aembed_query(document)
assert len(output) == 768
def test_huggingfacehub_embedding_invalid_repo() -> None:
"""Test huggingfacehub embedding repo id validation."""
# Only sentence-transformers models are currently supported.
with pytest.raises(ValueError):
HuggingFaceHubEmbeddings(repo_id="allenai/specter") # type: ignore[call-arg]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_llamacpp.py | # flake8: noqa
"""Test llamacpp embeddings."""
import os
from urllib.request import urlretrieve
from langchain_community.embeddings.llamacpp import LlamaCppEmbeddings
def get_model() -> str:
"""Download model.
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path.
"""
model_url = "https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin"
tokenizer_url = "https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model"
conversion_script = "https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py"
local_filename = model_url.split("/")[-1]
if not os.path.exists("convert-unversioned-ggml-to-ggml.py"):
urlretrieve(conversion_script, "convert-unversioned-ggml-to-ggml.py")
if not os.path.exists("tokenizer.model"):
urlretrieve(tokenizer_url, "tokenizer.model")
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system("python convert-unversioned-ggml-to-ggml.py . tokenizer.model")
return local_filename
def test_llamacpp_embedding_documents() -> None:
"""Test llamacpp embeddings."""
documents = ["foo bar"]
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path) # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512
def test_llamacpp_embedding_query() -> None:
"""Test llamacpp embeddings."""
document = "foo bar"
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path) # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 512
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_premai.py | """Test PremAIEmbeddings from PremAI API wrapper.
Note: This test must be run with the PREMAI_API_KEY environment variable set to a valid
API key and a valid project_id. This needs to setup a project in PremAI's platform.
You can check it out here: https://app.premai.io
"""
import pytest
from langchain_community.embeddings.premai import PremAIEmbeddings
@pytest.fixture
def embedder() -> PremAIEmbeddings:
return PremAIEmbeddings(project_id=8, model="text-embedding-3-small") # type: ignore[call-arg]
def test_prem_embedding_documents(embedder: PremAIEmbeddings) -> None:
"""Test Prem embeddings."""
documents = ["foo bar"]
output = embedder.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
def test_prem_embedding_documents_multiple(embedder: PremAIEmbeddings) -> None:
"""Test prem embeddings for multiple queries or documents."""
documents = ["foo bar", "bar foo", "foo"]
output = embedder.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
def test_prem_embedding_query(embedder: PremAIEmbeddings) -> None:
"""Test Prem embeddings for single query"""
document = "foo bar"
output = embedder.embed_query(document)
assert len(output) == 1536
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_modelscope_hub.py | """Test modelscope embeddings."""
from langchain_community.embeddings.modelscope_hub import ModelScopeEmbeddings
def test_modelscope_embedding_documents() -> None:
"""Test modelscope embeddings for documents."""
documents = ["foo bar"]
embedding = ModelScopeEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 512
def test_modelscope_embedding_query() -> None:
"""Test modelscope embeddings for query."""
document = "foo bar"
embedding = ModelScopeEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 512
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_google_palm.py | """Test Google PaLM embeddings.
Note: This test must be run with the GOOGLE_API_KEY environment variable set to a
valid API key.
"""
from langchain_community.embeddings.google_palm import GooglePalmEmbeddings
def test_google_palm_embedding_documents() -> None:
"""Test Google PaLM embeddings."""
documents = ["foo bar"]
embedding = GooglePalmEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_google_palm_embedding_documents_multiple() -> None:
"""Test Google PaLM embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = GooglePalmEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
assert len(output[1]) == 768
assert len(output[2]) == 768
def test_google_palm_embedding_query() -> None:
"""Test Google PaLM embeddings."""
document = "foo bar"
embedding = GooglePalmEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_yandex.py | import pytest
from langchain_community.embeddings.yandex import YandexGPTEmbeddings
@pytest.mark.parametrize(
"constructor_args",
[
dict(),
dict(disable_request_logging=True),
],
)
# @pytest.mark.scheduled - idk what it means
# requires YC_* env and active service
def test_yandex_embedding(constructor_args: dict) -> None:
documents = ["exactly same", "exactly same", "different"]
embedding = YandexGPTEmbeddings(**constructor_args)
doc_outputs = embedding.embed_documents(documents)
assert len(doc_outputs) == 3
for i in range(3):
assert len(doc_outputs[i]) >= 256 # there are many dims
assert len(doc_outputs[0]) == len(doc_outputs[i]) # dims are te same
assert doc_outputs[0] == doc_outputs[1] # same input, same embeddings
assert doc_outputs[2] != doc_outputs[1] # different input, different embeddings
qry_output = embedding.embed_query(documents[0])
assert len(qry_output) >= 256
assert len(doc_outputs[0]) == len(
qry_output
) # query and doc models have same dimensions
assert doc_outputs[0] != qry_output # query and doc models are different
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_xinference.py | """Test Xinference embeddings."""
import time
from typing import AsyncGenerator, Tuple
import pytest_asyncio
from langchain_community.embeddings import XinferenceEmbeddings
@pytest_asyncio.fixture
async def setup() -> AsyncGenerator[Tuple[str, str], None]:
import xoscar as xo
from xinference.deploy.supervisor import start_supervisor_components
from xinference.deploy.utils import create_worker_actor_pool
from xinference.deploy.worker import start_worker_components
pool = await create_worker_actor_pool(
f"test://127.0.0.1:{xo.utils.get_next_port()}"
)
print(f"Pool running on localhost:{pool.external_address}") # noqa: T201
endpoint = await start_supervisor_components(
pool.external_address, "127.0.0.1", xo.utils.get_next_port()
)
await start_worker_components(
address=pool.external_address, supervisor_address=pool.external_address
)
# wait for the api.
time.sleep(3)
async with pool:
yield endpoint, pool.external_address
def test_xinference_embedding_documents(setup: Tuple[str, str]) -> None:
"""Test xinference embeddings for documents."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(
model_name="vicuna-v1.3",
model_size_in_billions=7,
model_format="ggmlv3",
quantization="q4_0",
)
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
documents = ["foo bar", "bar foo"]
output = xinference.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 4096
def test_xinference_embedding_query(setup: Tuple[str, str]) -> None:
"""Test xinference embeddings for query."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(
model_name="vicuna-v1.3", model_size_in_billions=7, quantization="q4_0"
)
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
document = "foo bar"
output = xinference.embed_query(document)
assert len(output) == 4096
def test_xinference_embedding() -> None:
embedding_model = XinferenceEmbeddings(
server_url="http://xinference-hostname:9997", model_uid="foo"
)
embedding_model.embed_documents(
texts=["hello", "i'm trying to upgrade xinference embedding"]
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_embaas.py | """Test embaas embeddings."""
import responses
from langchain_community.embeddings.embaas import EMBAAS_API_URL, EmbaasEmbeddings
def test_embaas_embed_documents() -> None:
"""Test embaas embeddings with multiple texts."""
texts = ["foo bar", "bar foo", "foo"]
embedding = EmbaasEmbeddings()
output = embedding.embed_documents(texts)
assert len(output) == 3
assert len(output[0]) == 1024
assert len(output[1]) == 1024
assert len(output[2]) == 1024
def test_embaas_embed_query() -> None:
"""Test embaas embeddings with multiple texts."""
text = "foo"
embeddings = EmbaasEmbeddings()
output = embeddings.embed_query(text)
assert len(output) == 1024
def test_embaas_embed_query_instruction() -> None:
"""Test embaas embeddings with a different instruction."""
text = "Test"
instruction = "query"
embeddings = EmbaasEmbeddings(instruction=instruction)
output = embeddings.embed_query(text)
assert len(output) == 1024
def test_embaas_embed_query_model() -> None:
"""Test embaas embeddings with a different model."""
text = "Test"
model = "instructor-large"
instruction = "Represent the query for retrieval"
embeddings = EmbaasEmbeddings(model=model, instruction=instruction)
output = embeddings.embed_query(text)
assert len(output) == 768
@responses.activate
def test_embaas_embed_documents_response() -> None:
"""Test embaas embeddings with multiple texts."""
responses.add(
responses.POST,
EMBAAS_API_URL,
json={"data": [{"embedding": [0.0] * 1024}]},
status=200,
)
text = "asd"
embeddings = EmbaasEmbeddings()
output = embeddings.embed_query(text)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_cohere.py | """Test cohere embeddings."""
from langchain_community.embeddings.cohere import CohereEmbeddings
def test_cohere_embedding_documents() -> None:
"""Test cohere embeddings."""
documents = ["foo bar"]
embedding = CohereEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 2048
def test_cohere_embedding_query() -> None:
"""Test cohere embeddings."""
document = "foo bar"
embedding = CohereEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 2048
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_bookend.py | """Test Bookend AI embeddings."""
from langchain_community.embeddings.bookend import BookendEmbeddings
def test_bookend_embedding_documents() -> None:
"""Test Bookend AI embeddings for documents."""
documents = ["foo bar", "bar foo"]
embedding = BookendEmbeddings(
domain="<bookend_domain>",
api_token="<bookend_api_token>",
model_id="<bookend_embeddings_model_id>",
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
def test_bookend_embedding_query() -> None:
"""Test Bookend AI embeddings for query."""
document = "foo bar"
embedding = BookendEmbeddings(
domain="<bookend_domain>",
api_token="<bookend_api_token>",
model_id="<bookend_embeddings_model_id>",
)
output = embedding.embed_query(document)
assert len(output) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_jina.py | """Test jina embeddings."""
from langchain_community.embeddings.jina import JinaEmbeddings
def test_jina_embedding_documents() -> None:
"""Test jina embeddings for documents."""
documents = ["foo bar", "bar foo"]
embedding = JinaEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 512
def test_jina_embedding_query() -> None:
"""Test jina embeddings for query."""
document = "foo bar"
embedding = JinaEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 512
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/__init__.py | """Test embedding integrations."""
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_deepinfra.py | """Test DeepInfra API wrapper."""
from langchain_community.embeddings import DeepInfraEmbeddings
def test_deepinfra_call() -> None:
"""Test valid call to DeepInfra."""
deepinfra_emb = DeepInfraEmbeddings(model_id="BAAI/bge-base-en-v1.5")
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
assert len(r1) == 2
assert len(r1[0]) == 768
assert len(r1[1]) == 768
r2 = deepinfra_emb.embed_query("What is the third letter of Greek alphabet")
assert len(r2) == 768
def test_deepinfra_call_with_large_batch_size() -> None:
deepinfra_emb = DeepInfraEmbeddings(model_id="BAAI/bge-base-en-v1.5")
texts = 2000 * [
"Alpha is the first letter of Greek alphabet",
]
r1 = deepinfra_emb.embed_documents(texts)
assert len(r1) == 2000
assert len(r1[0]) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_azure_openai.py | """Test openai embeddings."""
import os
from typing import Any
import numpy as np
import pytest
from langchain_community.embeddings import AzureOpenAIEmbeddings
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY", "")
DEPLOYMENT_NAME = os.environ.get(
"AZURE_OPENAI_DEPLOYMENT_NAME",
os.environ.get("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME", ""),
)
def _get_embeddings(**kwargs: Any) -> AzureOpenAIEmbeddings:
return AzureOpenAIEmbeddings( # type: ignore[call-arg]
azure_deployment=DEPLOYMENT_NAME,
api_version=OPENAI_API_VERSION,
openai_api_base=OPENAI_API_BASE,
openai_api_key=OPENAI_API_KEY,
**kwargs,
)
@pytest.mark.scheduled
def test_azure_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = _get_embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert embedding.chunk_size == 2
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_chunk_size() -> None:
"""Test openai embeddings."""
documents = ["foo bar"] * 20
embedding = _get_embeddings()
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
# Max 16 chunks per batch on Azure OpenAI embeddings
assert embedding.chunk_size == 16
assert len(output) == 20
assert all([len(out) == 1536 for out in output])
@pytest.mark.scheduled
async def test_azure_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_azure_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
def test_azure_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = _get_embeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ # type: ignore[attr-defined]
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = _get_embeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = _get_embeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_titan_takeoff.py | """Test Titan Takeoff Embedding wrapper."""
import json
from typing import Any
import pytest
from langchain_community.embeddings import TitanTakeoffEmbed
from langchain_community.embeddings.titan_takeoff import (
Device,
MissingConsumerGroup,
ReaderConfig,
)
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
def test_titan_takeoff_call(httpx_mock: Any) -> None:
"""Test valid call to Titan Takeoff."""
port = 2345
httpx_mock.add_response(
method="POST",
url=f"http://localhost:{port}/embed",
json={"result": [0.46635, 0.234, -0.8521]},
)
embedding = TitanTakeoffEmbed(port=port)
output_1 = embedding.embed_documents(["What is 2 + 2?"], "primary")
output_2 = embedding.embed_query("What is 2 + 2?", "primary")
assert isinstance(output_1, list)
assert isinstance(output_2, list)
assert len(httpx_mock.get_requests()) == 2
for n in range(2):
assert httpx_mock.get_requests()[n].url == f"http://localhost:{port}/embed"
assert (
json.loads(httpx_mock.get_requests()[n].content)["text"] == "What is 2 + 2?"
)
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
def test_no_consumer_group_fails(httpx_mock: Any) -> None:
"""Test that not specifying a consumer group fails."""
port = 2345
httpx_mock.add_response(
method="POST",
url=f"http://localhost:{port}/embed",
json={"result": [0.46635, 0.234, -0.8521]},
)
embedding = TitanTakeoffEmbed(port=port)
with pytest.raises(MissingConsumerGroup):
embedding.embed_documents(["What is 2 + 2?"])
with pytest.raises(MissingConsumerGroup):
embedding.embed_query("What is 2 + 2?")
# Check specifying a consumer group works
embedding.embed_documents(["What is 2 + 2?"], "primary")
embedding.embed_query("What is 2 + 2?", "primary")
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
def test_takeoff_initialization(httpx_mock: Any) -> None:
"""Test valid call to Titan Takeoff."""
mgnt_port = 36452
inf_port = 46253
mgnt_url = f"http://localhost:{mgnt_port}/reader"
embed_url = f"http://localhost:{inf_port}/embed"
reader_1 = ReaderConfig(
model_name="test",
device=Device.cpu,
consumer_group="embed",
)
reader_2 = ReaderConfig(
model_name="test2",
device=Device.cuda,
consumer_group="embed",
)
httpx_mock.add_response(
method="POST", url=mgnt_url, json={"key": "value"}, status_code=201
)
httpx_mock.add_response(
method="POST",
url=embed_url,
json={"result": [0.34, 0.43, -0.934532]},
status_code=200,
)
llm = TitanTakeoffEmbed(
port=inf_port, mgmt_port=mgnt_port, models=[reader_1, reader_2]
)
# Shouldn't need to specify consumer group as there is only one specified during
# initialization
output_1 = llm.embed_documents(["What is 2 + 2?"])
output_2 = llm.embed_query("What is 2 + 2?")
assert isinstance(output_1, list)
assert isinstance(output_2, list)
# Ensure the management api was called to create the reader
assert len(httpx_mock.get_requests()) == 4
for key, value in reader_1.dict().items():
assert json.loads(httpx_mock.get_requests()[0].content)[key] == value
assert httpx_mock.get_requests()[0].url == mgnt_url
# Also second call should be made to spin uo reader 2
for key, value in reader_2.dict().items():
assert json.loads(httpx_mock.get_requests()[1].content)[key] == value
assert httpx_mock.get_requests()[1].url == mgnt_url
# Ensure the third call is to generate endpoint to inference
for n in range(2, 4):
assert httpx_mock.get_requests()[n].url == embed_url
assert (
json.loads(httpx_mock.get_requests()[n].content)["text"] == "What is 2 + 2?"
)
@pytest.mark.requires("pytest_httpx")
@pytest.mark.requires("takeoff_client")
def test_takeoff_initialization_with_more_than_one_consumer_group(
httpx_mock: Any,
) -> None:
"""Test valid call to Titan Takeoff."""
mgnt_port = 36452
inf_port = 46253
mgnt_url = f"http://localhost:{mgnt_port}/reader"
embed_url = f"http://localhost:{inf_port}/embed"
reader_1 = ReaderConfig(
model_name="test",
device=Device.cpu,
consumer_group="embed",
)
reader_2 = ReaderConfig(
model_name="test2",
device=Device.cuda,
consumer_group="embed2",
)
httpx_mock.add_response(
method="POST", url=mgnt_url, json={"key": "value"}, status_code=201
)
httpx_mock.add_response(
method="POST",
url=embed_url,
json={"result": [0.34, 0.43, -0.934532]},
status_code=200,
)
llm = TitanTakeoffEmbed(
port=inf_port, mgmt_port=mgnt_port, models=[reader_1, reader_2]
)
# There was more than one consumer group specified during initialization so we
# need to specify which one to use
with pytest.raises(MissingConsumerGroup):
llm.embed_documents(["What is 2 + 2?"])
with pytest.raises(MissingConsumerGroup):
llm.embed_query("What is 2 + 2?")
output_1 = llm.embed_documents(["What is 2 + 2?"], "embed")
output_2 = llm.embed_query("What is 2 + 2?", "embed2")
assert isinstance(output_1, list)
assert isinstance(output_2, list)
# Ensure the management api was called to create the reader
assert len(httpx_mock.get_requests()) == 4
for key, value in reader_1.dict().items():
assert json.loads(httpx_mock.get_requests()[0].content)[key] == value
assert httpx_mock.get_requests()[0].url == mgnt_url
# Also second call should be made to spin uo reader 2
for key, value in reader_2.dict().items():
assert json.loads(httpx_mock.get_requests()[1].content)[key] == value
assert httpx_mock.get_requests()[1].url == mgnt_url
# Ensure the third call is to generate endpoint to inference
for n in range(2, 4):
assert httpx_mock.get_requests()[n].url == embed_url
assert (
json.loads(httpx_mock.get_requests()[n].content)["text"] == "What is 2 + 2?"
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_sambanova.py | """Test SambaNova Embeddings."""
from langchain_community.embeddings.sambanova import (
SambaStudioEmbeddings,
)
def test_embedding_documents() -> None:
"""Test embeddings for documents."""
documents = ["foo", "bar"]
embedding = SambaStudioEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
def test_embedding_query() -> None:
"""Test embeddings for query."""
document = "foo bar"
embedding = SambaStudioEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_minimax.py | from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings import MiniMaxEmbeddings
def test_initialization_with_alias() -> None:
"""Test minimax embedding model initialization with alias."""
api_key = "your-api-key"
group_id = "your-group-id"
embeddings = MiniMaxEmbeddings( # type: ignore[arg-type, call-arg]
api_key=api_key, # type: ignore[arg-type]
group_id=group_id, # type: ignore[arg-type]
)
assert cast(SecretStr, embeddings.minimax_api_key).get_secret_value() == api_key
assert embeddings.minimax_group_id == group_id
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_qianfan_endpoint.py | """Test Baidu Qianfan Embedding Endpoint."""
from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings.baidu_qianfan_endpoint import (
QianfanEmbeddingsEndpoint,
)
def test_embedding_multiple_documents() -> None:
documents = ["foo", "bar"]
embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
def test_embedding_query() -> None:
query = "foo"
embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg]
output = embedding.embed_query(query)
assert len(output) == 384
def test_model() -> None:
documents = ["hi", "qianfan"]
embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1") # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
def test_rate_limit() -> None:
llm = QianfanEmbeddingsEndpoint( # type: ignore[call-arg]
model="Embedding-V1", init_kwargs={"query_per_second": 2}
)
assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2
documents = ["foo", "bar"]
output = llm.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
def test_initialization_with_alias() -> None:
"""Test qianfan embedding model initialization with alias."""
api_key = "your-api-key"
secret_key = "your-secret-key"
embeddings = QianfanEmbeddingsEndpoint( # type: ignore[arg-type, call-arg]
api_key=api_key, # type: ignore[arg-type]
secret_key=secret_key, # type: ignore[arg-type]
)
assert cast(SecretStr, embeddings.qianfan_ak).get_secret_value() == api_key
assert cast(SecretStr, embeddings.qianfan_sk).get_secret_value() == secret_key
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/embeddings/test_awa.py | """Test Awa Embedding"""
from langchain_community.embeddings.awa import AwaEmbeddings
def test_awa_embedding_documents() -> None:
"""Test Awa embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = AwaEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
def test_awa_embedding_query() -> None:
"""Test Awa embeddings for query."""
document = "foo bar"
embedding = AwaEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 768
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/README.rst | Example Docs
------------
The sample docs directory contains the following files:
- ``example-10k.html`` - A 10-K SEC filing in HTML format
- ``layout-parser-paper.pdf`` - A PDF copy of the layout parser paper
- ``factbook.xml``/``factbook.xsl`` - Example XML/XLS files that you
can use to test stylesheets
These documents can be used to test out the parsers in the library. In
addition, here are instructions for pulling in some sample docs that are
too big to store in the repo.
XBRL 10-K
^^^^^^^^^
You can get an example 10-K in inline XBRL format using the following
``curl``. Note, you need to have the user agent set in the header or the
SEC site will reject your request.
.. code:: bash
curl -O \
-A '${organization} ${email}'
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
You can parse this document using the HTML parser.
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/stanley-cups.tsv | Stanley Cups
Team Location Stanley Cups
Blues STL 1
Flyers PHI 2
Maple Leafs TOR 13
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/default-encoding.py | u = "🦜🔗"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/brandfetch-brandfetch-2.0.0-resolved.json | {
"openapi": "3.0.1",
"info": {
"title": "Brandfetch API",
"description": "Brandfetch API (v2) for retrieving brand information.\n\nSee our [documentation](https://docs.brandfetch.com/) for further details. ",
"termsOfService": "https://brandfetch.com/terms",
"contact": {
"url": "https://brandfetch.com/developers"
},
"version": "2.0.0"
},
"externalDocs": {
"description": "Documentation",
"url": "https://docs.brandfetch.com/"
},
"servers": [
{
"url": "https://api.brandfetch.io/v2"
}
],
"paths": {
"/brands/{domainOrId}": {
"get": {
"summary": "Retrieve a brand",
"description": "Fetch brand information by domain or ID\n\nFurther details here: https://docs.brandfetch.com/reference/retrieve-brand\n",
"parameters": [
{
"name": "domainOrId",
"in": "path",
"description": "Domain or ID of the brand",
"required": true,
"style": "simple",
"explode": false,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "Brand data",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Brand"
},
"examples": {
"brandfetch.com": {
"value": "{\"name\":\"Brandfetch\",\"domain\":\"brandfetch.com\",\"claimed\":true,\"description\":\"All brands. In one place\",\"links\":[{\"name\":\"twitter\",\"url\":\"https://twitter.com/brandfetch\"},{\"name\":\"linkedin\",\"url\":\"https://linkedin.com/company/brandfetch\"}],\"logos\":[{\"type\":\"logo\",\"theme\":\"light\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/id9WE9j86h.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":15555}]},{\"type\":\"logo\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idWbsK1VCy.png\",\"background\":\"transparent\",\"format\":\"png\",\"height\":215,\"width\":800,\"size\":33937},{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idtCMfbWO0.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"height\":null,\"width\":null,\"size\":15567}]},{\"type\":\"symbol\",\"theme\":\"light\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idXGq6SIu2.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":2215}]},{\"type\":\"symbol\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/iddCQ52AR5.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":2215}]},{\"type\":\"icon\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idls3LaPPQ.png\",\"background\":null,\"format\":\"png\",\"height\":400,\"width\":400,\"size\":2565}]}],\"colors\":[{\"hex\":\"#0084ff\",\"type\":\"accent\",\"brightness\":113},{\"hex\":\"#00193E\",\"type\":\"brand\",\"brightness\":22},{\"hex\":\"#F03063\",\"type\":\"brand\",\"brightness\":93},{\"hex\":\"#7B0095\",\"type\":\"brand\",\"brightness\":37},{\"hex\":\"#76CC4B\",\"type\":\"brand\",\"brightness\":176},{\"hex\":\"#FFDA00\",\"type\":\"brand\",\"brightness\":210},{\"hex\":\"#000000\",\"type\":\"dark\",\"brightness\":0},{\"hex\":\"#ffffff\",\"type\":\"light\",\"brightness\":255}],\"fonts\":[{\"name\":\"Poppins\",\"type\":\"title\",\"origin\":\"google\",\"originId\":\"Poppins\",\"weights\":[]},{\"name\":\"Inter\",\"type\":\"body\",\"origin\":\"google\",\"originId\":\"Inter\",\"weights\":[]}],\"images\":[{\"type\":\"banner\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idUuia5imo.png\",\"background\":\"transparent\",\"format\":\"png\",\"height\":500,\"width\":1500,\"size\":5539}]}]}"
}
}
}
}
},
"400": {
"description": "Invalid domain or ID supplied"
},
"404": {
"description": "The brand does not exist or the domain can't be resolved."
}
},
"security": [
{
"bearerAuth": []
}
]
}
}
},
"components": {
"schemas": {
"Brand": {
"required": [
"claimed",
"colors",
"description",
"domain",
"fonts",
"images",
"links",
"logos",
"name"
],
"type": "object",
"properties": {
"images": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageAsset"
}
},
"fonts": {
"type": "array",
"items": {
"$ref": "#/components/schemas/FontAsset"
}
},
"domain": {
"type": "string"
},
"claimed": {
"type": "boolean"
},
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"links": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Brand_links"
}
},
"logos": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageAsset"
}
},
"colors": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ColorAsset"
}
}
},
"description": "Object representing a brand"
},
"ColorAsset": {
"required": [
"brightness",
"hex",
"type"
],
"type": "object",
"properties": {
"brightness": {
"type": "integer"
},
"hex": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"accent",
"brand",
"customizable",
"dark",
"light",
"vibrant"
]
}
},
"description": "Brand color asset"
},
"FontAsset": {
"type": "object",
"properties": {
"originId": {
"type": "string"
},
"origin": {
"type": "string",
"enum": [
"adobe",
"custom",
"google",
"system"
]
},
"name": {
"type": "string"
},
"type": {
"type": "string"
},
"weights": {
"type": "array",
"items": {
"type": "number"
}
},
"items": {
"type": "string"
}
},
"description": "Brand font asset"
},
"ImageAsset": {
"required": [
"formats",
"theme",
"type"
],
"type": "object",
"properties": {
"formats": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageFormat"
}
},
"theme": {
"type": "string",
"enum": [
"light",
"dark"
]
},
"type": {
"type": "string",
"enum": [
"logo",
"icon",
"symbol",
"banner"
]
}
},
"description": "Brand image asset"
},
"ImageFormat": {
"required": [
"background",
"format",
"size",
"src"
],
"type": "object",
"properties": {
"size": {
"type": "integer"
},
"src": {
"type": "string"
},
"background": {
"type": "string",
"enum": [
"transparent"
]
},
"format": {
"type": "string"
},
"width": {
"type": "integer"
},
"height": {
"type": "integer"
}
},
"description": "Brand image asset image format"
},
"Brand_links": {
"required": [
"name",
"url"
],
"type": "object",
"properties": {
"name": {
"type": "string"
},
"url": {
"type": "string"
}
}
}
},
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "API Key"
}
}
}
} |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/whatsapp_chat.txt | [05.05.23, 15:48:11] James: Hi here
[11/8/21, 9:41:32 AM] User name: Message 123
1/23/23, 3:19 AM - User 2: Bye!
1/23/23, 3:22_AM - User 1: And let me know if anything changes
[1/24/21, 12:41:03 PM] ~ User name 2: Of course!
[2023/5/4, 16:13:23] ~ User 2: See you!
7/19/22, 11:32 PM - User 1: Hello
7/20/22, 11:32 am - User 2: Goodbye
4/20/23, 9:42 am - User 3: <Media omitted>
6/29/23, 12:16 am - User 4: This message was deleted
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/docusaurus-sitemap.xml | <?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml"
xmlns:image="http://www.google.com/schemas/sitemap-image/1.1"
xmlns:video="http://www.google.com/schemas/sitemap-video/1.1">
<url>
<loc>https://python.langchain.com/docs/integrations/document_loaders/sitemap</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/cookbook</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/additional_resources</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/modules/chains/how_to/</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/use_cases/summarization</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
</urlset> |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/fake-email-attachment.eml | MIME-Version: 1.0
Date: Fri, 23 Dec 2022 12:08:48 -0600
Message-ID: <CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>
Subject: Fake email with attachment
From: Mallori Harrell <mallori@unstructured.io>
To: Mallori Harrell <mallori@unstructured.io>
Content-Type: multipart/mixed; boundary="0000000000005d654405f082adb7"
--0000000000005d654405f082adb7
Content-Type: multipart/alternative; boundary="0000000000005d654205f082adb5"
--0000000000005d654205f082adb5
Content-Type: text/plain; charset="UTF-8"
Hello!
Here's the attachments!
It includes:
- Lots of whitespace
- Little to no content
- and is a quick read
Best,
Mallori
--0000000000005d654205f082adb5
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr">Hello!=C2=A0<div><br></div><div>Here's the attachments=
!</div><div><br></div><div>It includes:</div><div><ul><li style=3D"margin-l=
eft:15px">Lots of whitespace</li><li style=3D"margin-left:15px">Little=C2=
=A0to no content</li><li style=3D"margin-left:15px">and is a quick read</li=
></ul><div>Best,</div></div><div><br></div><div>Mallori</div><div dir=3D"lt=
r" class=3D"gmail_signature" data-smartmail=3D"gmail_signature"><div dir=3D=
"ltr"><div><div><br></div></div></div></div></div>
--0000000000005d654205f082adb5--
--0000000000005d654405f082adb7
Content-Type: text/plain; charset="US-ASCII"; name="fake-attachment.txt"
Content-Disposition: attachment; filename="fake-attachment.txt"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_lc0tto5j0
Content-ID: <f_lc0tto5j0>
SGV5IHRoaXMgaXMgYSBmYWtlIGF0dGFjaG1lbnQh
--0000000000005d654405f082adb7-- |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/stanley-cups.csv | Stanley Cups,,
Team,Location,Stanley Cups
Blues,STL,1
Flyers,PHI,2
Maple Leafs,TOR,13 |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/sample_rss_feeds.opml | <?xml version="1.0" encoding="UTF-8"?>
<opml version="1.0">
<head>
<title>Sample RSS feed subscriptions</title>
</head>
<body>
<outline text="Tech" title="Tech">
<outline type="rss" text="Engadget" title="Engadget" xmlUrl="http://www.engadget.com/rss-full.xml" htmlUrl="http://www.engadget.com"/>
<outline type="rss" text="Ars Technica - All content" title="Ars Technica - All content" xmlUrl="http://feeds.arstechnica.com/arstechnica/index/" htmlUrl="https://arstechnica.com"/>
</outline>
</body>
</opml>
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/layout-parser-paper-page-1.txt | LayoutParser : A Unified Toolkit for Deep
Learning Based Document Image Analysis
Zejiang Shen 1 ( ), Ruochen Zhang 2, Melissa Dell 3, Benjamin Charles Germain
Lee 4, Jacob Carlson 3, and Weining Li 5
1 Allen Institute for AI
shannons@allenai.org
2 Brown University
ruochen zhang@brown.edu
3 Harvard University
{melissadell,jacob carlson }@fas.harvard.edu
4 University of Washington
bcgl@cs.washington.edu
5 University of Waterloo
w422li@uwaterloo.ca
Abstract. Recentadvancesindocumentimageanalysis(DIA)havebeen
primarily driven by the application of neural networks. Ideally, research
outcomes could be easily deployed in production and extended for further
investigation. However, various factors like loosely organized codebases
and sophisticated model configurations complicate the easy reuse of im-
portant innovations by awide audience. Though there havebeen on-going
efforts to improve reusability and simplify deep learning (DL) model
development in disciplines like natural language processing and computer
vision, none of them are optimized for challenges in the domain of DIA.
This represents a major gap in the existing toolkit, as DIA is central to
academic research across a wide range of disciplines in the social sciences
and humanities. This paper introduces LayoutParser , an open-source
library for streamlining the usage of DL in DIA research and applica-
tions. The core LayoutParser library comes with a set of simple and
intuitive interfaces for applying and customizing DL models for layout de-
tection,characterrecognition,andmanyotherdocumentprocessingtasks.
To promote extensibility, LayoutParser also incorporates a community
platform for sharing both pre-trained models and full document digiti-
zation pipelines. We demonstrate that LayoutParser is helpful for both
lightweight and large-scale digitization pipelines in real-word use cases.
The library is publicly available at https://layout-parser.github.io .
Keywords: DocumentImageAnalysis ·DeepLearning ·LayoutAnalysis
· Character Recognition · Open Source library · Toolkit.
1 Introduction
Deep Learning(DL)-based approaches are the state-of-the-art for a wide range of
documentimageanalysis(DIA)tasksincludingdocumentimageclassification[ 11 , |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/facebook_chat.json | {
"participants": [{"name": "User 1"}, {"name": "User 2"}],
"messages": [
{"sender_name": "User 2", "timestamp_ms": 1675597571851, "content": "Bye!"},
{
"sender_name": "User 1",
"timestamp_ms": 1675597435669,
"content": "Oh no worries! Bye"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675596277579,
"content": "No Im sorry it was my mistake, the blue one is not for sale"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675595140251,
"content": "I thought you were selling the blue one!"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675595109305,
"content": "Im not interested in this bag. Im interested in the blue one!"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595068468,
"content": "Here is $129"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595060730,
"photos": [
{"uri": "url_of_some_picture.jpg", "creation_timestamp": 1675595059}
]
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595045152,
"content": "Online is at least $100"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675594799696,
"content": "How much do you want?"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675577876645,
"content": "Goodmorning! $50 is too low."
},
{
"sender_name": "User 1",
"timestamp_ms": 1675549022673,
"content": "Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!"
}
],
"title": "User 1 and User 2 chat",
"is_still_participant": true,
"thread_path": "inbox/User 1 and User 2 chat",
"magic_words": [],
"image": {"uri": "image_of_the_chat.jpg", "creation_timestamp": 1675549016},
"joinable_mode": {"mode": 1, "link": ""}
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/README.org | * Example Docs
The sample docs directory contains the following files:
- ~example-10k.html~ - A 10-K SEC filing in HTML format
- ~layout-parser-paper.pdf~ - A PDF copy of the layout parser paper
- ~factbook.xml~ / ~factbook.xsl~ - Example XML/XLS files that you
can use to test stylesheets
These documents can be used to test out the parsers in the library. In
addition, here are instructions for pulling in some sample docs that are
too big to store in the repo.
** XBRL 10-K
You can get an example 10-K in inline XBRL format using the following
~curl~. Note, you need to have the user agent set in the header or the
SEC site will reject your request.
#+BEGIN_SRC bash
curl -O \
-A '${organization} ${email}'
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
#+END_SRC
You can parse this document using the HTML parser.
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/example.json | {
"messages": [
{
"sender_name": "User 2",
"timestamp_ms": 1675597571851,
"content": "Bye!"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675597435669,
"content": "Oh no worries! Bye"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595060730,
"photos": [
{
"uri": "url_of_some_picture.jpg",
"creation_timestamp": 1675595059
}
]
}
],
"title": "User 1 and User 2 chat"
} |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/hello_world.js | class HelloWorld {
sayHello() {
console.log("Hello World!");
}
}
function main() {
const hello = new HelloWorld();
hello.sayHello();
}
main();
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/example-utf8.html | <html>
<head>
<title>Chew dad's slippers</title>
</head>
<body>
<h1>
Instead of drinking water from the cat bowl, make sure to steal water from
the toilet
</h1>
<h2>Chase the red dot</h2>
<p>
Munch, munch, chomp, chomp hate dogs. Spill litter box, scratch at owner,
destroy all furniture, especially couch get scared by sudden appearance of
cucumber cat is love, cat is life fat baby cat best buddy little guy for
catch eat throw up catch eat throw up bad birds jump on fridge. Purr like
a car engine oh yes, there is my human woman she does best pats ever that
all i like about her hiss meow .
</p>
<p>
Dead stare with ears cocked when “owners” are asleep, cry for no apparent
reason meow all night. Plop down in the middle where everybody walks favor
packaging over toy. Sit on the laptop kitty pounce, trip, faceplant.
</p>
</body>
</html>
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/sitemap.xml | <?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>https://python.langchain.com/en/stable/</loc>
<lastmod>2023-05-04T16:15:31.377584+00:00</lastmod>
<changefreq>weekly</changefreq>
<priority>1</priority>
</url>
<url>
<loc>https://python.langchain.com/en/latest/</loc>
<lastmod>2023-05-05T07:52:19.633878+00:00</lastmod>
<changefreq>daily</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://python.langchain.com/en/harrison-docs-refactor-3-24/</loc>
<lastmod>2023-03-27T02:32:55.132916+00:00</lastmod>
<changefreq>monthly</changefreq>
<priority>0.8</priority>
</url>
</urlset> |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/example.mht | From: <Saved by Blink>
Snapshot-Content-Location: https://langchain.com/
Subject:
Date: Fri, 16 Jun 2023 19:32:59 -0000
MIME-Version: 1.0
Content-Type: multipart/related;
type="text/html";
boundary="----MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt----"
------MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt----
Content-Type: text/html
Content-ID: <frame-2F1DB31BBD26C55A7F1EEC7561350515@mhtml.blink>
Content-Transfer-Encoding: quoted-printable
Content-Location: https://langchain.com/
<html><head><title>LangChain</title><meta http-equiv=3D"Content-Type" content=3D"text/html; charset=
=3DUTF-8"><link rel=3D"stylesheet" type=3D"text/css" href=3D"cid:css-c9ac93=
be-2ab2-46d8-8690-80da3a6d1832@mhtml.blink" /></head><body data-new-gr-c-s-=
check-loaded=3D"14.1112.0" data-gr-ext-installed=3D""><p align=3D"center">
<b><font size=3D"6">L</font><font size=3D"4">ANG </font><font size=3D"6">C=
</font><font size=3D"4">HAIN </font><font size=3D"2">=F0=9F=A6=9C=EF=B8=8F=
=F0=9F=94=97</font><br>Official Home Page</b><font size=3D"1"> </font>=
</p>
<hr>
<center>
<table border=3D"0" cellspacing=3D"0" width=3D"90%">
<tbody>
<tr>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://langchain.com/integrations.html">Integration=
s</a>=20
</li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://langchain.com/features.html">Features</a>=20
</li></ul></td></tr>
<tr>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://blog.langchain.dev/">Blog</a>=20
</li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://docs.langchain.com/docs/">Conceptual Guide</=
a>=20
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/langchain">Python Repo<=
/a></li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/langchainjs">JavaScript=
Repo</a></li></ul></td></tr>
=20
=09
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://python.langchain.com/en/latest/">Python Docu=
mentation</a> </li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://js.langchain.com/docs/">JavaScript Document=
ation</a>
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/chat-langchain">Python =
ChatLangChain</a> </li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/sullivan-sean/chat-langchainjs">=
JavaScript ChatLangChain</a>
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://discord.gg/6adMQxSpJS">Discord</a> </li></ul=
></td>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://twitter.com/langchainai">Twitter</a>
</li></ul></td></tr>
=09
</tbody></table></center>
<hr>
<font size=3D"2">
<p>If you have any comments about our WEB page, you can=20
write us at the address shown above. However, due to=20
the limited number of personnel in our corporate office, we are unable to=
=20
provide a direct response.</p></font>
<hr>
<p align=3D"left"><font size=3D"2">Copyright =C2=A9 2023-2023<b> LangChain =
Inc.</b></font><font size=3D"2">=20
</font></p>
</body></html>
------MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt------
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/example.html | <html>
<head>
<title>Chew dad's slippers</title>
</head>
<body>
<h1>
Instead of drinking water from the cat bowl, make sure to steal water from
the toilet
</h1>
<h2>Chase the red dot</h2>
<p>
Munch, munch, chomp, chomp hate dogs. Spill litter box, scratch at owner,
destroy all furniture, especially couch get scared by sudden appearance of
cucumber cat is love, cat is life fat baby cat best buddy little guy for
catch eat throw up catch eat throw up bad birds jump on fridge. Purr like
a car engine oh yes, there is my human woman she does best pats ever that
all i like about her hiss meow .
</p>
<p>
Dead stare with ears cocked when owners are asleep, cry for no apparent
reason meow all night. Plop down in the middle where everybody walks favor
packaging over toy. Sit on the laptop kitty pounce, trip, faceplant.
</p>
</body>
</html>
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/factbook.xml | <?xml version="1.0" encoding="UTF-8"?>
<factbook>
<country>
<name>United States</name>
<capital>Washington, DC</capital>
<leader>Joe Biden</leader>
<sport>Baseball</sport>
</country>
<country>
<name>Canada</name>
<capital>Ottawa</capital>
<leader>Justin Trudeau</leader>
<sport>Hockey</sport>
</country>
<country>
<name>France</name>
<capital>Paris</capital>
<leader>Emmanuel Macron</leader>
<sport>Soccer</sport>
</country>
<country>
<name>Trinidad & Tobado</name>
<capital>Port of Spain</capital>
<leader>Keith Rowley</leader>
<sport>Track & Field</sport>
</country>
</factbook>
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/examples/hello_world.py | #!/usr/bin/env python3
import sys
def main() -> int:
print("Hello World!") # noqa: T201
return 0
if __name__ == "__main__":
sys.exit(main())
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_zep.py | from __future__ import annotations
import copy
from typing import TYPE_CHECKING, List
import pytest
from langchain_core.documents import Document
from pytest_mock import MockerFixture
from langchain_community.retrievers import ZepRetriever
if TYPE_CHECKING:
from zep_python import MemorySearchResult, ZepClient
@pytest.fixture
def search_results() -> List[MemorySearchResult]:
from zep_python import MemorySearchResult, Message
search_result = [
{
"message": {
"uuid": "66830914-19f5-490b-8677-1ba06bcd556b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "I'm looking to plan a trip to Iceland. Can you help me?",
"token_count": 17,
},
"summary": None,
"dist": 0.8734284910450115,
},
{
"message": {
"uuid": "015e618c-ba9d-45b6-95c3-77a8e611570b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "How much does a trip to Iceland typically cost?",
"token_count": 12,
},
"summary": None,
"dist": 0.8554048017463456,
},
]
return [
MemorySearchResult(
message=Message.parse_obj(result["message"]),
summary=result["summary"],
dist=result["dist"],
)
for result in search_result
]
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_retriever(
mocker: MockerFixture, search_results: List[MemorySearchResult]
) -> ZepRetriever:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
mock_zep_client.memory.search_memory.return_value = copy.deepcopy( # type: ignore
search_results
)
mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( # type: ignore
search_results
)
zep = ZepRetriever(session_id="123", url="http://localhost:8000") # type: ignore[call-arg]
zep.zep_client = mock_zep_client
return zep
@pytest.mark.requires("zep_python")
def test_zep_retriever_invoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = zep_retriever.invoke("My trip to Iceland")
_test_documents(documents, search_results)
@pytest.mark.requires("zep_python")
async def test_zep_retriever_ainvoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = await zep_retriever.ainvoke("My trip to Iceland")
_test_documents(documents, search_results)
def _test_documents(
documents: List[Document], search_results: List[MemorySearchResult]
) -> None:
assert len(documents) == 2
for i, document in enumerate(documents):
assert document.page_content == search_results[i].message.get( # type: ignore
"content"
)
assert document.metadata.get("uuid") == search_results[i].message.get( # type: ignore
"uuid"
)
assert document.metadata.get("role") == search_results[i].message.get( # type: ignore
"role"
)
assert document.metadata.get("score") == search_results[i].dist
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_google_docai_warehoure_retriever.py | """Test Google Cloud Document AI Warehouse retriever."""
import os
from langchain_core.documents import Document
from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever
def test_google_documentai_warehoure_retriever() -> None:
"""In order to run this test, you should provide a project_id and user_ldap.
Example:
export USER_LDAP=...
export PROJECT_NUMBER=...
"""
project_number = os.environ["PROJECT_NUMBER"]
user_ldap = os.environ["USER_LDAP"]
docai_wh_retriever = GoogleDocumentAIWarehouseRetriever(
project_number=project_number
)
documents = docai_wh_retriever.invoke(
"What are Alphabet's Other Bets?", user_ldap=user_ldap
)
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_thirdai_neuraldb.py | import os
import shutil
from typing import Generator
import pytest
from langchain_community.retrievers import NeuralDBRetriever
@pytest.fixture(scope="session")
def test_csv() -> Generator[str, None, None]:
csv = "thirdai-test.csv"
with open(csv, "w") as o:
o.write("column_1,column_2\n")
o.write("column one,column two\n")
yield csv
os.remove(csv)
def assert_result_correctness(documents: list) -> None:
assert len(documents) == 1
assert documents[0].page_content == "column_1: column one\n\ncolumn_2: column two"
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_from_scratch(test_csv: str) -> None:
retriever = NeuralDBRetriever.from_scratch()
retriever.insert([test_csv])
documents = retriever.invoke("column")
assert_result_correctness(documents)
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_from_checkpoint(test_csv: str) -> None:
checkpoint = "thirdai-test-save.ndb"
if os.path.exists(checkpoint):
shutil.rmtree(checkpoint)
try:
retriever = NeuralDBRetriever.from_scratch()
retriever.insert([test_csv])
retriever.save(checkpoint)
loaded_retriever = NeuralDBRetriever.from_checkpoint(checkpoint)
documents = loaded_retriever.invoke("column")
assert_result_correctness(documents)
finally:
if os.path.exists(checkpoint):
shutil.rmtree(checkpoint)
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_other_methods(test_csv: str) -> None:
retriever = NeuralDBRetriever.from_scratch()
retriever.insert([test_csv])
# Make sure they don't throw an error.
retriever.associate("A", "B")
retriever.associate_batch([("A", "B"), ("C", "D")])
retriever.upvote("A", 0)
retriever.upvote_batch([("A", 0), ("B", 0)])
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/retrievers/test_arxiv.py | """Integration test for Arxiv API Wrapper."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.retrievers import ArxivRetriever
@pytest.fixture
def retriever() -> ArxivRetriever:
return ArxivRetriever() # type: ignore[call-arg]
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"Published", "Title", "Authors", "Summary"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: ArxivRetriever) -> None:
docs = retriever.invoke("1605.08386")
assert len(docs) == 1
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: ArxivRetriever) -> None:
retriever.load_all_available_meta = True
retriever.load_max_docs = 2
docs = retriever.invoke("ChatGPT")
assert len(docs) > 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = ArxivRetriever(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg]
docs = retriever.invoke("ChatGPT")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_no_result(retriever: ArxivRetriever) -> None:
docs = retriever.invoke("1605.08386WWW")
assert not docs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.