index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chains/test_pebblo_retrieval.py | """
Unit tests for the PebbloRetrievalQA chain
"""
from typing import List
from unittest.mock import Mock
import pytest
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import (
InMemoryVectorStore,
VectorStore,
VectorStoreRetriever,
)
from langchain_community.chains import PebbloRetrievalQA
from langchain_community.chains.pebblo_retrieval.models import (
AuthContext,
ChainInput,
SemanticContext,
)
from langchain_community.vectorstores.pinecone import Pinecone
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeRetriever(VectorStoreRetriever):
"""
Test util that parrots the query back as documents
"""
vectorstore: VectorStore = Mock()
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return [Document(page_content=query)]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
return [Document(page_content=query)]
@pytest.fixture
def retriever() -> FakeRetriever:
"""
Create a FakeRetriever instance
"""
retriever = FakeRetriever()
retriever.search_kwargs = {}
# Set the class of vectorstore to Pinecone
retriever.vectorstore.__class__ = Pinecone
return retriever
@pytest.fixture
def pebblo_retrieval_qa(retriever: FakeRetriever) -> PebbloRetrievalQA:
"""
Create a PebbloRetrievalQA instance
"""
pebblo_retrieval_qa = PebbloRetrievalQA.from_chain_type(
llm=FakeLLM(),
chain_type="stuff",
retriever=retriever,
owner="owner",
description="description",
app_name="app_name",
)
return pebblo_retrieval_qa
def test_invoke(pebblo_retrieval_qa: PebbloRetrievalQA) -> None:
"""
Test that the invoke method returns a non-None result
"""
# Create a fake auth context and semantic context
auth_context = AuthContext(
user_id="fake_user@email.com",
user_auth=["fake-group", "fake-group2"],
)
semantic_context_dict = {
"pebblo_semantic_topics": {"deny": ["harmful-advice"]},
"pebblo_semantic_entities": {"deny": ["credit-card"]},
}
semantic_context = SemanticContext(**semantic_context_dict)
question = "What is the meaning of life?"
chain_input_obj = ChainInput(
query=question, auth_context=auth_context, semantic_context=semantic_context
)
response = pebblo_retrieval_qa.invoke(chain_input_obj.dict())
assert response is not None
def test_validate_vectorstore(retriever: FakeRetriever) -> None:
"""
Test vectorstore validation
"""
# No exception should be raised for supported vectorstores (Pinecone)
_ = PebbloRetrievalQA.from_chain_type(
llm=FakeLLM(),
chain_type="stuff",
retriever=retriever,
owner="owner",
description="description",
app_name="app_name",
)
unsupported_retriever = FakeRetriever()
unsupported_retriever.search_kwargs = {}
# Set the class of vectorstore
unsupported_retriever.vectorstore.__class__ = InMemoryVectorStore
# validate_vectorstore method should raise a ValueError for unsupported vectorstores
with pytest.raises(ValueError) as exc_info:
_ = PebbloRetrievalQA.from_chain_type(
llm=FakeLLM(),
chain_type="stuff",
retriever=unsupported_retriever,
owner="owner",
description="description",
app_name="app_name",
)
assert (
"Vectorstore must be an instance of one of the supported vectorstores"
in str(exc_info.value)
)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chains/test_natbot.py | """Test functionality related to natbot."""
from typing import Any, Dict, List, Optional
from langchain.chains.natbot.base import NatBotChain
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return `foo` if longer than 10000 words, else `bar`."""
if len(prompt) > 10000:
return "foo"
else:
return "bar"
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def get_num_tokens(self, text: str) -> int:
return len(text.split())
@property
def _identifying_params(self) -> Dict[str, Any]:
return {}
def test_proper_inputs() -> None:
"""Test that natbot shortens inputs correctly."""
nat_bot_chain = NatBotChain.from_llm(FakeLLM(), objective="testing")
url = "foo" * 10000
browser_content = "foo" * 10000
output = nat_bot_chain.execute(url, browser_content)
assert output == "bar"
def test_variable_key_naming() -> None:
"""Test that natbot handles variable key naming correctly."""
nat_bot_chain = NatBotChain.from_llm(
FakeLLM(),
objective="testing",
input_url_key="u",
input_browser_content_key="b",
output_key="c",
)
output = nat_bot_chain.execute("foo", "foo")
assert output == "bar"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chains/test_api.py | """Test LLM Math functionality."""
import json
from typing import Any
import pytest
from langchain.chains.api.base import APIChain
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.llm import LLMChain
from langchain_community.utilities.requests import TextRequestsWrapper
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeRequestsChain(TextRequestsWrapper):
"""Fake requests chain just for testing purposes."""
output: str
def get(self, url: str, **kwargs: Any) -> str:
"""Just return the specified output."""
return self.output
def get_test_api_data() -> dict:
"""Fake api data to use for testing."""
api_docs = """
This API endpoint will search the notes for a user.
Endpoint: https://thisapidoesntexist.com
GET /api/notes
Query parameters:
q | string | The search term for notes
"""
return {
"api_docs": api_docs,
"question": "Search for notes containing langchain",
"api_url": "https://thisapidoesntexist.com/api/notes?q=langchain",
"api_response": json.dumps(
{
"success": True,
"results": [{"id": 1, "content": "Langchain is awesome!"}],
}
),
"api_summary": "There is 1 note about langchain.",
}
def get_api_chain(**kwargs: Any) -> APIChain:
"""Fake LLM API chain for testing."""
data = get_test_api_data()
test_api_docs = data["api_docs"]
test_question = data["question"]
test_url = data["api_url"]
test_api_response = data["api_response"]
test_api_summary = data["api_summary"]
api_url_query_prompt = API_URL_PROMPT.format(
api_docs=test_api_docs, question=test_question
)
api_response_prompt = API_RESPONSE_PROMPT.format(
api_docs=test_api_docs,
question=test_question,
api_url=test_url,
api_response=test_api_response,
)
queries = {api_url_query_prompt: test_url, api_response_prompt: test_api_summary}
fake_llm = FakeLLM(queries=queries)
api_request_chain = LLMChain(llm=fake_llm, prompt=API_URL_PROMPT)
api_answer_chain = LLMChain(llm=fake_llm, prompt=API_RESPONSE_PROMPT)
requests_wrapper = FakeRequestsChain(output=test_api_response)
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=test_api_docs,
**kwargs,
)
def test_api_question() -> None:
"""Test simple question that needs API access."""
with pytest.raises(ValueError):
get_api_chain()
with pytest.raises(ValueError):
get_api_chain(limit_to_domains=tuple())
# All domains allowed (not advised)
api_chain = get_api_chain(limit_to_domains=None)
data = get_test_api_data()
assert api_chain.run(data["question"]) == data["api_summary"]
# Use a domain that's allowed
api_chain = get_api_chain(
limit_to_domains=["https://thisapidoesntexist.com/api/notes?q=langchain"]
)
# Attempts to make a request against a domain that's not allowed
assert api_chain.run(data["question"]) == data["api_summary"]
# Use domains that are not valid
api_chain = get_api_chain(limit_to_domains=["h", "*"])
with pytest.raises(ValueError):
# Attempts to make a request against a domain that's not allowed
assert api_chain.run(data["question"]) == data["api_summary"]
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chains/test_llm.py | """Test LLM chain."""
from typing import Dict, List, Union
import pytest
from langchain.chains.llm import LLMChain
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import PromptTemplate
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeOutputParser(BaseOutputParser):
"""Fake output parser class for testing."""
def parse(self, text: str) -> Union[str, List[str], Dict[str, str]]:
"""Parse by splitting."""
return text.split()
@pytest.fixture
def fake_llm_chain() -> LLMChain:
"""Fake LLM chain for testing purposes."""
prompt = PromptTemplate(input_variables=["bar"], template="This is a {bar}:")
return LLMChain(prompt=prompt, llm=FakeLLM(), output_key="text1")
def test_missing_inputs(fake_llm_chain: LLMChain) -> None:
"""Test error is raised if inputs are missing."""
with pytest.raises(ValueError):
fake_llm_chain({"foo": "bar"})
def test_valid_call(fake_llm_chain: LLMChain) -> None:
"""Test valid call of LLM chain."""
output = fake_llm_chain({"bar": "baz"})
assert output == {"bar": "baz", "text1": "foo"}
# Test with stop words.
output = fake_llm_chain({"bar": "baz", "stop": ["foo"]})
# Response should be `bar` now.
assert output == {"bar": "baz", "stop": ["foo"], "text1": "bar"}
def test_predict_method(fake_llm_chain: LLMChain) -> None:
"""Test predict method works."""
output = fake_llm_chain.predict(bar="baz")
assert output == "foo"
def test_predict_and_parse() -> None:
"""Test parsing ability."""
prompt = PromptTemplate(
input_variables=["foo"], template="{foo}", output_parser=FakeOutputParser()
)
llm = FakeLLM(queries={"foo": "foo bar"})
chain = LLMChain(prompt=prompt, llm=llm)
output = chain.predict_and_parse(foo="foo")
assert output == ["foo", "bar"]
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/evaluation/test_loading.py | """Test the loading function for evaluators."""
from typing import List
import pytest
from langchain.evaluation.loading import EvaluatorType, load_evaluators
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
from langchain_core.embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("evaluator_type", EvaluatorType)
def test_load_evaluators(evaluator_type: EvaluatorType) -> None:
"""Test loading evaluators."""
fake_llm = FakeChatModel()
embeddings = FakeEmbeddings(size=32)
load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings)
# Test as string
load_evaluators(
[evaluator_type.value], # type: ignore
llm=fake_llm,
embeddings=embeddings,
)
@pytest.mark.parametrize(
"evaluator_types",
[
[EvaluatorType.LABELED_CRITERIA],
[EvaluatorType.LABELED_PAIRWISE_STRING],
[EvaluatorType.LABELED_SCORE_STRING],
[EvaluatorType.QA],
[EvaluatorType.CONTEXT_QA],
[EvaluatorType.COT_QA],
[EvaluatorType.COT_QA, EvaluatorType.LABELED_CRITERIA],
[
EvaluatorType.COT_QA,
EvaluatorType.LABELED_CRITERIA,
EvaluatorType.LABELED_PAIRWISE_STRING,
],
[EvaluatorType.JSON_EQUALITY],
[EvaluatorType.EXACT_MATCH, EvaluatorType.REGEX_MATCH],
],
)
def test_eval_chain_requires_references(evaluator_types: List[EvaluatorType]) -> None:
"""Test loading evaluators."""
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
evaluators = load_evaluators(
evaluator_types,
llm=fake_llm,
)
for evaluator in evaluators:
if not isinstance(evaluator, (StringEvaluator, PairwiseStringEvaluator)):
raise ValueError("Evaluator is not a [pairwise]string evaluator")
assert evaluator.requires_reference
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_octoai.py | import pytest
from pydantic import SecretStr, ValidationError
from langchain_community.chat_models.octoai import ChatOctoAI
DEFAULT_API_BASE = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "llama-2-13b-chat"
@pytest.mark.requires("openai")
def test__default_octoai_api_base() -> None:
chat = ChatOctoAI(octoai_api_token=SecretStr("test_token")) # type: ignore[call-arg]
assert chat.octoai_api_base == DEFAULT_API_BASE
@pytest.mark.requires("openai")
def test__default_octoai_api_token() -> None:
chat = ChatOctoAI(octoai_api_token=SecretStr("test_token")) # type: ignore[call-arg]
assert chat.octoai_api_token.get_secret_value() == "test_token"
@pytest.mark.requires("openai")
def test__default_model_name() -> None:
chat = ChatOctoAI(octoai_api_token=SecretStr("test_token")) # type: ignore[call-arg]
assert chat.model_name == DEFAULT_MODEL
@pytest.mark.requires("openai")
def test__field_aliases() -> None:
chat = ChatOctoAI(octoai_api_token=SecretStr("test_token"), model="custom-model") # type: ignore[call-arg]
assert chat.model_name == "custom-model"
assert chat.octoai_api_token.get_secret_value() == "test_token"
@pytest.mark.requires("openai")
def test__missing_octoai_api_token() -> None:
with pytest.raises(ValidationError) as e:
ChatOctoAI()
assert "Did not find octoai_api_token" in str(e)
@pytest.mark.requires("openai")
def test__all_fields_provided() -> None:
chat = ChatOctoAI( # type: ignore[call-arg]
octoai_api_token=SecretStr("test_token"),
model="custom-model",
octoai_api_base="https://custom.api/base/",
)
assert chat.octoai_api_base == "https://custom.api/base/"
assert chat.octoai_api_token.get_secret_value() == "test_token"
assert chat.model_name == "custom-model"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_sparkllm.py | import pytest
from langchain_core.messages import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.sparkllm import (
ChatSparkLLM,
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls,
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
@pytest.mark.requires("websocket")
def test__chat_spark_llm_initialization() -> None:
chat = ChatSparkLLM(
app_id="IFLYTEK_SPARK_APP_ID",
api_key="IFLYTEK_SPARK_API_KEY",
api_secret="IFLYTEK_SPARK_API_SECRET",
api_url="IFLYTEK_SPARK_API_URL",
model="IFLYTEK_SPARK_LLM_DOMAIN",
timeout=40,
temperature=0.1,
top_k=3,
)
assert chat.spark_app_id == "IFLYTEK_SPARK_APP_ID"
assert chat.spark_api_key == "IFLYTEK_SPARK_API_KEY"
assert chat.spark_api_secret == "IFLYTEK_SPARK_API_SECRET"
assert chat.spark_api_url == "IFLYTEK_SPARK_API_URL"
assert chat.spark_llm_domain == "IFLYTEK_SPARK_LLM_DOMAIN"
assert chat.request_timeout == 40
assert chat.temperature == 0.1
assert chat.top_k == 3
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_oci_generative_ai.py | """Test OCI Generative AI LLM service"""
from unittest.mock import MagicMock
import pytest
from langchain_core.messages import HumanMessage
from pytest import MonkeyPatch
from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
class MockResponseDict(dict):
def __getattr__(self, val): # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.requires("oci")
@pytest.mark.parametrize(
"test_model_id", ["cohere.command-r-16k", "meta.llama-3-70b-instruct"]
)
def test_llm_chat(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid chat call to OCI Generative AI LLM service."""
oci_gen_ai_client = MagicMock()
llm = ChatOCIGenAI(model_id=test_model_id, client=oci_gen_ai_client)
model_id = llm.model_id
if model_id is None:
raise ValueError("Model ID is required for OCI Generative AI LLM service.")
provider = model_id.split(".")[0].lower()
def mocked_response(*args): # type: ignore[no-untyped-def]
response_text = "Assistant chat reply."
response = None
if provider == "cohere":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"text": response_text,
"finish_reason": "completed",
"is_search_required": None,
"search_queries": None,
"citations": None,
"documents": None,
"tool_calls": None,
}
),
"model_id": "cohere.command-r-16k",
"model_version": "1.0.0",
}
),
"request_id": "1234567890",
"headers": MockResponseDict(
{
"content-length": "123",
}
),
}
)
elif provider == "meta":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"choices": [
MockResponseDict(
{
"message": MockResponseDict(
{
"content": [
MockResponseDict(
{
"text": response_text, # noqa: E501
}
)
]
}
),
"finish_reason": "completed",
}
)
],
"time_created": "2024-09-01T00:00:00Z",
}
),
"model_id": "cohere.command-r-16k",
"model_version": "1.0.0",
}
),
"request_id": "1234567890",
"headers": MockResponseDict(
{
"content-length": "123",
}
),
}
)
return response
monkeypatch.setattr(llm.client, "chat", mocked_response)
messages = [
HumanMessage(content="User message"),
]
expected = "Assistant chat reply."
actual = llm.invoke(messages, temperature=0.2)
assert actual.content == expected
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_baichuan.py | from typing import cast
import pytest
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
ToolMessage,
)
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.chat_models.baichuan import (
ChatBaichuan,
_convert_delta_to_message_chunk,
_convert_dict_to_message,
_convert_message_to_dict,
)
def test_initialization() -> None:
"""Test chat model initialization."""
for model in [
ChatBaichuan(model="Baichuan2-Turbo-192K", api_key="test-api-key", timeout=40), # type: ignore[arg-type, call-arg]
ChatBaichuan( # type: ignore[call-arg]
model="Baichuan2-Turbo-192K",
baichuan_api_key="test-api-key",
request_timeout=40,
),
]:
assert model.model == "Baichuan2-Turbo-192K"
assert isinstance(model.baichuan_api_key, SecretStr)
assert model.request_timeout == 40
assert model.temperature == 0.3
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = ToolMessage(name="foo", content="bar", tool_call_id="abc123")
result = _convert_message_to_dict(message)
expected_output = {
"name": "foo",
"content": "bar",
"tool_call_id": "abc123",
"role": "tool",
}
assert result == expected_output
def test__convert_message_to_dict_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_delta_to_message_assistant() -> None:
delta = {"role": "assistant", "content": "foo"}
result = _convert_delta_to_message_chunk(delta, AIMessageChunk)
expected_output = AIMessageChunk(content="foo")
assert result == expected_output
def test__convert_delta_to_message_human() -> None:
delta = {"role": "user", "content": "foo"}
result = _convert_delta_to_message_chunk(delta, HumanMessageChunk)
expected_output = HumanMessageChunk(content="foo")
assert result == expected_output
def test_baichuan_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("BAICHUAN_API_KEY", "test-api-key")
chat = ChatBaichuan() # type: ignore[call-arg]
print(chat.baichuan_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_baichuan_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
chat = ChatBaichuan(baichuan_api_key="test-api-key") # type: ignore[call-arg]
print(chat.baichuan_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_uses_actual_secret_value_from_secret_str() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
chat = ChatBaichuan( # type: ignore[call-arg]
baichuan_api_key="test-api-key",
baichuan_secret_key="test-secret-key", # type: ignore[arg-type] # For backward compatibility
)
assert cast(SecretStr, chat.baichuan_api_key).get_secret_value() == "test-api-key"
assert (
cast(SecretStr, chat.baichuan_secret_key).get_secret_value()
== "test-secret-key"
)
def test_chat_baichuan_with_base_url() -> None:
chat = ChatBaichuan( # type: ignore[call-arg]
api_key="your-api-key", # type: ignore[arg-type]
base_url="https://exmaple.com", # type: ignore[arg-type]
)
assert chat.baichuan_api_base == "https://exmaple.com"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_naver.py | """Test chat model integration."""
import json
import os
from typing import Any, AsyncGenerator, Generator, cast
from unittest.mock import patch
import pytest
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import (
AIMessage,
HumanMessage,
SystemMessage,
)
from pydantic import SecretStr
from langchain_community.chat_models import ChatClovaX
from langchain_community.chat_models.naver import (
_convert_message_to_naver_chat_message,
_convert_naver_chat_message_to_message,
)
os.environ["NCP_CLOVASTUDIO_API_KEY"] = "test_api_key"
os.environ["NCP_APIGW_API_KEY"] = "test_gw_key"
def test_initialization_api_key() -> None:
"""Test chat model initialization."""
chat_model = ChatClovaX(api_key="foo", apigw_api_key="bar") # type: ignore[arg-type]
assert (
cast(SecretStr, chat_model.ncp_clovastudio_api_key).get_secret_value() == "foo"
)
assert cast(SecretStr, chat_model.ncp_apigw_api_key).get_secret_value() == "bar"
def test_initialization_model_name() -> None:
llm = ChatClovaX(model="HCX-DASH-001") # type: ignore[call-arg]
assert llm.model_name == "HCX-DASH-001"
llm = ChatClovaX(model_name="HCX-DASH-001")
assert llm.model_name == "HCX-DASH-001"
def test_convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = _convert_naver_chat_message_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
assert _convert_message_to_naver_chat_message(expected_output) == message
def test_convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = _convert_naver_chat_message_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
assert _convert_message_to_naver_chat_message(expected_output) == message
def test_convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = _convert_naver_chat_message_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
assert _convert_message_to_naver_chat_message(expected_output) == message
@pytest.fixture
def mock_chat_completion_response() -> dict:
return {
"status": {"code": "20000", "message": "OK"},
"result": {
"message": {
"role": "assistant",
"content": "Phrases: Record what happened today and prepare "
"for tomorrow. "
"The diary will make your life richer.",
},
"stopReason": "LENGTH",
"inputLength": 100,
"outputLength": 10,
"aiFilter": [
{"groupName": "curse", "name": "insult", "score": "1"},
{"groupName": "curse", "name": "discrimination", "score": "0"},
{
"groupName": "unsafeContents",
"name": "sexualHarassment",
"score": "2",
},
],
},
}
def test_naver_invoke(mock_chat_completion_response: dict) -> None:
llm = ChatClovaX()
completed = False
def mock_completion_with_retry(*args: Any, **kwargs: Any) -> Any:
nonlocal completed
completed = True
return mock_chat_completion_response
with patch.object(ChatClovaX, "_completion_with_retry", mock_completion_with_retry):
res = llm.invoke("Let's test it.")
assert (
res.content
== "Phrases: Record what happened today and prepare for tomorrow. "
"The diary will make your life richer."
)
assert completed
async def test_naver_ainvoke(mock_chat_completion_response: dict) -> None:
llm = ChatClovaX()
completed = False
async def mock_acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
nonlocal completed
completed = True
return mock_chat_completion_response
with patch.object(
ChatClovaX, "_acompletion_with_retry", mock_acompletion_with_retry
):
res = await llm.ainvoke("Let's test it.")
assert (
res.content
== "Phrases: Record what happened today and prepare for tomorrow. "
"The diary will make your life richer."
)
assert completed
def _make_completion_response_from_token(token: str): # type: ignore[no-untyped-def]
from httpx_sse import ServerSentEvent
return ServerSentEvent(
event="token",
data=json.dumps(
dict(
index=0,
inputLength=89,
outputLength=1,
message=dict(
content=token,
role="assistant",
),
)
),
)
def mock_chat_stream(*args: Any, **kwargs: Any) -> Generator:
def it() -> Generator:
for token in ["Hello", " how", " can", " I", " help", "?"]:
yield _make_completion_response_from_token(token)
return it()
async def mock_chat_astream(*args: Any, **kwargs: Any) -> AsyncGenerator:
async def it() -> AsyncGenerator:
for token in ["Hello", " how", " can", " I", " help", "?"]:
yield _make_completion_response_from_token(token)
return it()
class MyCustomHandler(BaseCallbackHandler):
last_token: str = ""
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.last_token = token
@patch(
"langchain_community.chat_models.ChatClovaX._completion_with_retry",
new=mock_chat_stream,
)
@pytest.mark.requires("httpx_sse")
def test_stream_with_callback() -> None:
callback = MyCustomHandler()
chat = ChatClovaX(callbacks=[callback])
for token in chat.stream("Hello"):
assert callback.last_token == token.content
@patch(
"langchain_community.chat_models.ChatClovaX._acompletion_with_retry",
new=mock_chat_astream,
)
@pytest.mark.requires("httpx_sse")
async def test_astream_with_callback() -> None:
callback = MyCustomHandler()
chat = ChatClovaX(callbacks=[callback])
async for token in chat.astream("Hello"):
assert callback.last_token == token.content
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_zhipuai.py | """Test ZhipuAI Chat API wrapper"""
import pytest
from langchain_core.messages import ToolMessage
from langchain_community.chat_models.zhipuai import (
ChatZhipuAI,
_convert_message_to_dict,
)
@pytest.mark.requires("httpx", "httpx_sse", "jwt")
def test_zhipuai_model_param() -> None:
llm = ChatZhipuAI(api_key="test", model="foo")
assert llm.model_name == "foo"
llm = ChatZhipuAI(api_key="test", model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
def test__convert_message_to_dict_with_tool() -> None:
message = ToolMessage(name="foo", content="bar", tool_call_id="abc123")
result = _convert_message_to_dict(message)
expected_output = {
"name": "foo",
"content": "bar",
"tool_call_id": "abc123",
"role": "tool",
}
assert result == expected_output
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_kinetica.py | """Test `Kinetica` chat models"""
import logging
from typing import Any
from langchain_core.messages import AIMessage
from langchain_community.chat_models.kinetica import ChatKinetica, KineticaUtil
LOG = logging.getLogger(__name__)
class TestChatKinetica:
test_ctx_json: str = """
{
"payload":{
"question": "foo",
"context":[
{
"table":"demo.test_profiles",
"columns":[
"username VARCHAR (32) NOT NULL",
"name VARCHAR (32) NOT NULL",
"sex VARCHAR (1) NOT NULL",
"address VARCHAR (64) NOT NULL",
"mail VARCHAR (32) NOT NULL",
"birthdate TIMESTAMP NOT NULL"
],
"description":"Contains user profiles.",
"rules":[
]
},
{
"samples":{
"How many male users are there?":
"select count(1) as num_users from demo.test_profiles where sex = ''M'';"
}
}
]
}
}
"""
def test_convert_messages(self, monkeypatch: Any) -> None:
"""Test convert messages from context."""
def patch_kdbc() -> None:
return None
monkeypatch.setattr(KineticaUtil, "create_kdbc", patch_kdbc)
def patch_execute_sql(*args: Any, **kwargs: Any) -> dict:
return dict(Prompt=self.test_ctx_json)
monkeypatch.setattr(ChatKinetica, "_execute_sql", patch_execute_sql)
kinetica_llm = ChatKinetica() # type: ignore[call-arg]
test_messages = kinetica_llm.load_messages_from_context("test")
LOG.info(f"test_messages: {test_messages}")
ai_message = test_messages[-1]
assert isinstance(ai_message, AIMessage)
assert (
ai_message.content
== "select count(1) as num_users from demo.test_profiles where sex = 'M';"
)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_openai.py | """Test OpenAI Chat API wrapper."""
import json
from typing import Any, List
from unittest.mock import MagicMock, patch
import pytest
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.adapters.openai import convert_dict_to_message
from langchain_community.chat_models.openai import ChatOpenAI
@pytest.mark.requires("openai")
def test_openai_model_param() -> None:
test_cases: List[dict] = [
{"model_name": "foo", "openai_api_key": "foo"},
{"model": "foo", "openai_api_key": "foo"},
{"model_name": "foo", "api_key": "foo"},
{"model_name": "foo", "openai_api_key": "foo", "max_retries": 2},
]
for case in test_cases:
llm = ChatOpenAI(**case)
assert llm.model_name == "foo", "Model name should be 'foo'"
assert llm.openai_api_key == "foo", "API key should be 'foo'"
assert hasattr(llm, "max_retries"), "max_retries attribute should exist"
assert llm.max_retries == 2, "max_retries default should be set to 2"
def test_function_message_dict_to_function_message() -> None:
content = json.dumps({"result": "Example #1"})
name = "test_function"
result = convert_dict_to_message(
{
"role": "function",
"name": name,
"content": content,
}
)
assert isinstance(result, FunctionMessage)
assert result.name == name
assert result.content == content
def test__convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "chatcmpl-7fcZavknQda3SQ",
"object": "chat.completion",
"created": 1689989000,
"model": "gpt-3.5-turbo-0613",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Bar Baz",
},
"finish_reason": "stop",
}
],
}
@pytest.mark.requires("openai")
def test_openai_predict(mock_completion: dict) -> None:
llm = ChatOpenAI(openai_api_key="foo") # type: ignore[call-arg]
mock_client = MagicMock()
completed = False
def mock_create(*args: Any, **kwargs: Any) -> Any:
nonlocal completed
completed = True
return mock_completion
mock_client.create = mock_create
with patch.object(
llm,
"client",
mock_client,
):
res = llm.invoke("bar")
assert res.content == "Bar Baz"
assert completed
@pytest.mark.requires("openai")
async def test_openai_apredict(mock_completion: dict) -> None:
llm = ChatOpenAI(openai_api_key="foo") # type: ignore[call-arg]
mock_client = MagicMock()
completed = False
async def mock_create(*args: Any, **kwargs: Any) -> Any:
nonlocal completed
completed = True
return mock_completion
mock_client.create = mock_create
with patch.object(
llm,
"async_client",
mock_client,
):
res = await llm.apredict("bar")
assert res == "Bar Baz"
assert completed
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_edenai.py | """Test EdenAI Chat API wrapper."""
from typing import List
import pytest
from langchain_core.messages import (
BaseMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_community.chat_models.edenai import (
_extract_edenai_tool_results_from_messages,
_format_edenai_messages,
_message_role,
)
@pytest.mark.parametrize(
("messages", "expected"),
[
(
[
SystemMessage(content="Translate the text from English to French"),
HumanMessage(content="Hello how are you today?"),
],
{
"text": "Hello how are you today?",
"previous_history": [],
"chatbot_global_action": "Translate the text from English to French",
"tool_results": [],
},
)
],
)
def test_edenai_messages_formatting(messages: List[BaseMessage], expected: str) -> None:
result = _format_edenai_messages(messages)
assert result == expected
@pytest.mark.parametrize(
("role", "role_response"),
[("ai", "assistant"), ("human", "user"), ("chat", "user")],
)
def test_edenai_message_role(role: str, role_response) -> None: # type: ignore[no-untyped-def]
role = _message_role(role)
assert role == role_response
def test_extract_edenai_tool_results_mixed_messages() -> None:
fake_other_msg = BaseMessage(content="content", type="other message")
messages = [
fake_other_msg,
ToolMessage(tool_call_id="id1", content="result1"),
fake_other_msg,
ToolMessage(tool_call_id="id2", content="result2"),
ToolMessage(tool_call_id="id3", content="result3"),
]
expected_tool_results = [
{"id": "id2", "result": "result2"},
{"id": "id3", "result": "result3"},
]
expected_other_messages = [
fake_other_msg,
ToolMessage(tool_call_id="id1", content="result1"),
fake_other_msg,
]
tool_results, other_messages = _extract_edenai_tool_results_from_messages(messages)
assert tool_results == expected_tool_results
assert other_messages == expected_other_messages
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_litellm.py | """Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_community.chat_models.litellm import ChatLiteLLM
@pytest.mark.requires("litellm")
class TestLiteLLMStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatLiteLLM
@property
def chat_model_params(self) -> dict:
return {"api_key": "test_api_key"}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_standard_params(self, model: BaseChatModel) -> None:
super().test_standard_params(model)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_perplexity.py | """Test Perplexity Chat API wrapper."""
import os
import pytest
from langchain_community.chat_models import ChatPerplexity
os.environ["PPLX_API_KEY"] = "foo"
@pytest.mark.requires("openai")
def test_perplexity_model_name_param() -> None:
llm = ChatPerplexity(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
@pytest.mark.requires("openai")
def test_perplexity_model_kwargs() -> None:
llm = ChatPerplexity(model="test", model_kwargs={"foo": "bar"}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("openai")
def test_perplexity_initialization() -> None:
"""Test perplexity initialization."""
# Verify that chat perplexity can be initialized using a secret key provided
# as a parameter rather than an environment variable.
for model in [
ChatPerplexity( # type: ignore[call-arg]
model="test", timeout=1, api_key="test", temperature=0.7, verbose=True
),
ChatPerplexity( # type: ignore[call-arg]
model="test",
request_timeout=1,
pplx_api_key="test",
temperature=0.7,
verbose=True,
),
]:
assert model.request_timeout == 1
assert model.pplx_api_key == "test"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_cloudflare_workersai.py | """Test CloudflareWorkersAI Chat API wrapper."""
from typing import Any, Dict, List, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_community.chat_models.cloudflare_workersai import (
ChatCloudflareWorkersAI,
_convert_messages_to_cloudflare_messages,
)
class TestChatCloudflareWorkersAI(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatCloudflareWorkersAI
@property
def chat_model_params(self) -> dict:
return {
"account_id": "my_account_id",
"api_token": "my_api_token",
"model": "@hf/nousresearch/hermes-2-pro-mistral-7b",
}
@pytest.mark.parametrize(
("messages", "expected"),
[
# Test case with a single HumanMessage
(
[HumanMessage(content="Hello, AI!")],
[{"role": "user", "content": "Hello, AI!"}],
),
# Test case with SystemMessage, HumanMessage, and AIMessage without tool calls
(
[
SystemMessage(content="System initialized."),
HumanMessage(content="Hello, AI!"),
AIMessage(content="Response from AI"),
],
[
{"role": "system", "content": "System initialized."},
{"role": "user", "content": "Hello, AI!"},
{"role": "assistant", "content": "Response from AI"},
],
),
# Test case with ToolMessage and tool_call_id
(
[
ToolMessage(
content="Tool message content", tool_call_id="tool_call_123"
),
],
[
{
"role": "tool",
"content": "Tool message content",
"tool_call_id": "tool_call_123",
}
],
),
],
)
def test_convert_messages_to_cloudflare_format(
messages: List[BaseMessage], expected: List[Dict[str, Any]]
) -> None:
result = _convert_messages_to_cloudflare_messages(messages)
assert result == expected
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_writer.py | import json
from typing import Any, Dict, List, Literal, Optional, Tuple, Type
from unittest import mock
from unittest.mock import AsyncMock, MagicMock
import pytest
from langchain_core.callbacks.manager import CallbackManager
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_tests.unit_tests import ChatModelUnitTests
from pydantic import SecretStr
from langchain_community.chat_models.writer import ChatWriter
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
"""Classes for mocking Writer responses."""
class ChoiceDelta:
def __init__(self, content: str):
self.content = content
class ChunkChoice:
def __init__(self, index: int, finish_reason: str, delta: ChoiceDelta):
self.index = index
self.finish_reason = finish_reason
self.delta = delta
class ChatCompletionChunk:
def __init__(
self,
id: str,
object: str,
created: int,
model: str,
choices: List[ChunkChoice],
):
self.id = id
self.object = object
self.created = created
self.model = model
self.choices = choices
class ToolCallFunction:
def __init__(self, name: str, arguments: str):
self.name = name
self.arguments = arguments
class ChoiceMessageToolCall:
def __init__(self, id: str, type: str, function: ToolCallFunction):
self.id = id
self.type = type
self.function = function
class Usage:
def __init__(
self,
prompt_tokens: int,
completion_tokens: int,
total_tokens: int,
):
self.prompt_tokens = prompt_tokens
self.completion_tokens = completion_tokens
self.total_tokens = total_tokens
class ChoiceMessage:
def __init__(
self,
role: str,
content: str,
tool_calls: Optional[List[ChoiceMessageToolCall]] = None,
):
self.role = role
self.content = content
self.tool_calls = tool_calls
class Choice:
def __init__(self, index: int, finish_reason: str, message: ChoiceMessage):
self.index = index
self.finish_reason = finish_reason
self.message = message
class Chat:
def __init__(
self,
id: str,
object: str,
created: int,
system_fingerprint: str,
model: str,
usage: Usage,
choices: List[Choice],
):
self.id = id
self.object = object
self.created = created
self.system_fingerprint = system_fingerprint
self.model = model
self.usage = usage
self.choices = choices
@pytest.mark.requires("writerai")
class TestChatWriterCustom:
"""Test case for ChatWriter"""
@pytest.fixture(autouse=True)
def mock_unstreaming_completion(self) -> Chat:
"""Fixture providing a mock API response."""
return Chat(
id="chat-12345",
object="chat.completion",
created=1699000000,
model="palmyra-x-004",
system_fingerprint="v1",
usage=Usage(prompt_tokens=10, completion_tokens=8, total_tokens=18),
choices=[
Choice(
index=0,
finish_reason="stop",
message=ChoiceMessage(
role="assistant",
content="Hello! How can I help you?",
),
)
],
)
@pytest.fixture(autouse=True)
def mock_tool_call_choice_response(self) -> Chat:
return Chat(
id="chat-12345",
object="chat.completion",
created=1699000000,
model="palmyra-x-004",
system_fingerprint="v1",
usage=Usage(prompt_tokens=29, completion_tokens=32, total_tokens=61),
choices=[
Choice(
index=0,
finish_reason="tool_calls",
message=ChoiceMessage(
role="assistant",
content="",
tool_calls=[
ChoiceMessageToolCall(
id="call_abc123",
type="function",
function=ToolCallFunction(
name="GetWeather",
arguments='{"location": "London"}',
),
)
],
),
)
],
)
@pytest.fixture(autouse=True)
def mock_streaming_chunks(self) -> List[ChatCompletionChunk]:
"""Fixture providing mock streaming response chunks."""
return [
ChatCompletionChunk(
id="chat-12345",
object="chat.completion",
created=1699000000,
model="palmyra-x-004",
choices=[
ChunkChoice(
index=0,
finish_reason="stop",
delta=ChoiceDelta(content="Hello! "),
)
],
),
ChatCompletionChunk(
id="chat-12345",
object="chat.completion",
created=1699000000,
model="palmyra-x-004",
choices=[
ChunkChoice(
index=0,
finish_reason="stop",
delta=ChoiceDelta(content="How can I help you?"),
)
],
),
]
def test_writer_model_param(self) -> None:
"""Test different ways to initialize the chat model."""
test_cases: List[dict] = [
{
"model_name": "palmyra-x-004",
"api_key": "key",
},
{
"model": "palmyra-x-004",
"api_key": "key",
},
{
"model_name": "palmyra-x-004",
"api_key": "key",
},
{
"model": "palmyra-x-004",
"temperature": 0.5,
"api_key": "key",
},
]
for case in test_cases:
chat = ChatWriter(**case)
assert chat.model_name == "palmyra-x-004"
assert chat.temperature == (0.5 if "temperature" in case else 0.7)
def test_convert_writer_to_langchain_human(self) -> None:
"""Test converting a human message dict to a LangChain message."""
message = {"role": "user", "content": "Hello"}
result = ChatWriter._convert_writer_to_langchain(message)
assert isinstance(result, HumanMessage)
assert result.content == "Hello"
def test_convert_writer_to_langchain_ai(self) -> None:
"""Test converting an AI message dict to a LangChain message."""
message = {"role": "assistant", "content": "Hello"}
result = ChatWriter._convert_writer_to_langchain(message)
assert isinstance(result, AIMessage)
assert result.content == "Hello"
def test_convert_writer_to_langchain_system(self) -> None:
"""Test converting a system message dict to a LangChain message."""
message = {"role": "system", "content": "You are a helpful assistant"}
result = ChatWriter._convert_writer_to_langchain(message)
assert isinstance(result, SystemMessage)
assert result.content == "You are a helpful assistant"
def test_convert_writer_to_langchain_tool_call(self) -> None:
"""Test converting a tool call message dict to a LangChain message."""
content = json.dumps({"result": 42})
message = {
"role": "tool",
"name": "get_number",
"content": content,
"tool_call_id": "call_abc123",
}
result = ChatWriter._convert_writer_to_langchain(message)
assert isinstance(result, ToolMessage)
assert result.name == "get_number"
assert result.content == content
def test_convert_writer_to_langchain_with_tool_calls(self) -> None:
"""Test converting an AIMessage with tool calls."""
message = {
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"location": "London"}',
},
}
],
}
result = ChatWriter._convert_writer_to_langchain(message)
assert isinstance(result, AIMessage)
assert result.tool_calls
assert len(result.tool_calls) == 1
assert result.tool_calls[0]["name"] == "get_weather"
assert result.tool_calls[0]["args"]["location"] == "London"
def test_sync_completion(
self, mock_unstreaming_completion: List[ChatCompletionChunk]
) -> None:
"""Test basic chat completion with mocked response."""
chat = ChatWriter(api_key=SecretStr("key"))
mock_client = MagicMock()
mock_client.chat.chat.return_value = mock_unstreaming_completion
with mock.patch.object(chat, "client", mock_client):
message = HumanMessage(content="Hi there!")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert response.content == "Hello! How can I help you?"
@pytest.mark.asyncio
async def test_async_completion(
self, mock_unstreaming_completion: List[ChatCompletionChunk]
) -> None:
"""Test async chat completion with mocked response."""
chat = ChatWriter(api_key=SecretStr("key"))
mock_async_client = AsyncMock()
mock_async_client.chat.chat.return_value = mock_unstreaming_completion
with mock.patch.object(chat, "async_client", mock_async_client):
message = HumanMessage(content="Hi there!")
response = await chat.ainvoke([message])
assert isinstance(response, AIMessage)
assert response.content == "Hello! How can I help you?"
def test_sync_streaming(
self, mock_streaming_chunks: List[ChatCompletionChunk]
) -> None:
"""Test sync streaming with callback handler."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatWriter(
api_key=SecretStr("key"),
callback_manager=callback_manager,
max_tokens=10,
)
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.__iter__.return_value = mock_streaming_chunks
mock_client.chat.chat.return_value = mock_response
with mock.patch.object(chat, "client", mock_client):
message = HumanMessage(content="Hi")
response = chat.stream([message])
response_message = ""
for chunk in response:
response_message += str(chunk.content)
assert callback_handler.llm_streams > 0
assert response_message == "Hello! How can I help you?"
@pytest.mark.asyncio
async def test_async_streaming(
self, mock_streaming_chunks: List[ChatCompletionChunk]
) -> None:
"""Test async streaming with callback handler."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatWriter(
api_key=SecretStr("key"),
callback_manager=callback_manager,
max_tokens=10,
)
mock_async_client = AsyncMock()
mock_response = AsyncMock()
mock_response.__aiter__.return_value = mock_streaming_chunks
mock_async_client.chat.chat.return_value = mock_response
with mock.patch.object(chat, "async_client", mock_async_client):
message = HumanMessage(content="Hi")
response = chat.astream([message])
response_message = ""
async for chunk in response:
response_message += str(chunk.content)
assert callback_handler.llm_streams > 0
assert response_message == "Hello! How can I help you?"
def test_sync_tool_calling(
self, mock_tool_call_choice_response: Dict[str, Any]
) -> None:
"""Test synchronous tool calling functionality."""
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
"""Get the weather in a location."""
location: str = Field(..., description="The location to get weather for")
chat = ChatWriter(api_key=SecretStr("key"))
mock_client = MagicMock()
mock_client.chat.chat.return_value = mock_tool_call_choice_response
chat_with_tools = chat.bind_tools(
tools=[GetWeather],
tool_choice="GetWeather",
)
with mock.patch.object(chat, "client", mock_client):
response = chat_with_tools.invoke("What's the weather in London?")
assert isinstance(response, AIMessage)
assert response.tool_calls
assert response.tool_calls[0]["name"] == "GetWeather"
assert response.tool_calls[0]["args"]["location"] == "London"
@pytest.mark.asyncio
async def test_async_tool_calling(
self, mock_tool_call_choice_response: Dict[str, Any]
) -> None:
"""Test asynchronous tool calling functionality."""
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
"""Get the weather in a location."""
location: str = Field(..., description="The location to get weather for")
mock_async_client = AsyncMock()
mock_async_client.chat.chat.return_value = mock_tool_call_choice_response
chat = ChatWriter(api_key=SecretStr("key"))
chat_with_tools = chat.bind_tools(
tools=[GetWeather],
tool_choice="GetWeather",
)
with mock.patch.object(chat, "async_client", mock_async_client):
response = await chat_with_tools.ainvoke("What's the weather in London?")
assert isinstance(response, AIMessage)
assert response.tool_calls
assert response.tool_calls[0]["name"] == "GetWeather"
assert response.tool_calls[0]["args"]["location"] == "London"
@pytest.mark.requires("writerai")
class TestChatWriterStandart(ChatModelUnitTests):
"""Test case for ChatWriter that inherits from standard LangChain tests."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
"""Return ChatWriter model class."""
return ChatWriter
@property
def chat_model_params(self) -> Dict:
"""Return any additional parameters needed."""
return {
"api_key": "fake-api-key",
"model_name": "palmyra-x-004",
}
@property
def has_tool_calling(self) -> bool:
"""Writer supports tool/function calling."""
return True
@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice in tests."""
return "auto"
@property
def has_structured_output(self) -> bool:
"""Writer does not yet support structured output."""
return False
@property
def supports_image_inputs(self) -> bool:
"""Writer does not support image inputs."""
return False
@property
def supports_video_inputs(self) -> bool:
"""Writer does not support video inputs."""
return False
@property
def returns_usage_metadata(self) -> bool:
"""Writer returns token usage information."""
return True
@property
def supports_anthropic_inputs(self) -> bool:
"""Writer does not support anthropic inputs."""
return False
@property
def supports_image_tool_message(self) -> bool:
"""Writer does not support image tool message."""
return False
@property
def supported_usage_metadata_details(
self,
) -> Dict[
Literal["invoke", "stream"],
List[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
"""Return which types of usage metadata your model supports."""
return {"invoke": ["cache_creation_input"], "stream": ["reasoning_output"]}
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars."""
return {"WRITER_API_KEY": "key"}, {"api_key": "key"}, {"api_key": "key"}
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_hunyuan.py | import pytest
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from langchain_community.chat_models.hunyuan import (
_convert_delta_to_message_chunk,
_convert_dict_to_message,
_convert_message_to_dict,
)
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"Role": "user", "Content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"Role": "assistant", "Content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"Role": "system", "Content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_human() -> None:
message_dict = {"Role": "user", "Content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"Role": "assistant", "Content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message_dict = {"Role": "system", "Content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"Role": "other", "Content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role="other", content="foo")
assert result == expected_output
def test__convert_delta_to_message_assistant() -> None:
delta = {"Role": "assistant", "Content": "foo"}
result = _convert_delta_to_message_chunk(delta, AIMessageChunk)
expected_output = AIMessageChunk(content="foo")
assert result == expected_output
def test__convert_delta_to_message_human() -> None:
delta = {"Role": "user", "Content": "foo"}
result = _convert_delta_to_message_chunk(delta, HumanMessageChunk)
expected_output = HumanMessageChunk(content="foo")
assert result == expected_output
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_huggingface.py | """Test HuggingFace Chat wrapper."""
from importlib import import_module
def test_import_class() -> None:
"""Test that the class can be imported."""
module_name = "langchain_community.chat_models.huggingface"
class_name = "ChatHuggingFace"
module = import_module(module_name)
assert hasattr(module, class_name)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_imports.py | from langchain_community.chat_models import __all__, _module_lookup
EXPECTED_ALL = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatClovaX",
"ChatCohere",
"ChatCoze",
"ChatDatabricks",
"ChatDeepInfra",
"ChatEverlyAI",
"ChatEdenAI",
"ChatFireworks",
"ChatFriendli",
"ChatGooglePalm",
"ChatHuggingFace",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKinetica",
"ChatKonko",
"ChatLiteLLM",
"ChatLiteLLMRouter",
"ChatLlamaCpp",
"ChatMLflowAIGateway",
"ChatMaritalk",
"ChatMlflow",
"ChatMLflowAIGateway",
"ChatMLX",
"ChatNebula",
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentVLLM",
"ChatOCIModelDeploymentTGI",
"ChatOllama",
"ChatOpenAI",
"ChatOutlines",
"ChatPerplexity",
"ChatPremAI",
"ChatSambaNovaCloud",
"ChatSambaStudio",
"ChatSparkLLM",
"ChatTongyi",
"ChatVertexAI",
"ChatYandexGPT",
"ChatYuan2",
"ChatReka",
"ChatZhipuAI",
"ErnieBotChat",
"FakeListChatModel",
"GPTRouter",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"LlamaEdgeChatService",
"MiniMaxChat",
"MoonshotChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"SolarChat",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"ChatOctoAI",
"ChatSnowflakeCortex",
"ChatYi",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
assert set(__all__) == set(_module_lookup.keys())
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_ernie.py | import pytest
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.ernie import _convert_message_to_dict
def test__convert_dict_to_message_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = SystemMessage(content="foo")
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_bedrock.py | """Test Anthropic Chat API wrapper."""
from typing import List
from unittest.mock import MagicMock
import pytest
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models import BedrockChat
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
@pytest.mark.parametrize(
("messages", "expected"),
[
([HumanMessage(content="Hello")], "[INST] Hello [/INST]"),
(
[HumanMessage(content="Hello"), AIMessage(content="Answer:")],
"[INST] Hello [/INST]\nAnswer:",
),
(
[
SystemMessage(content="You're an assistant"),
HumanMessage(content="Hello"),
AIMessage(content="Answer:"),
],
"<<SYS>> You're an assistant <</SYS>>\n[INST] Hello [/INST]\nAnswer:",
),
],
)
def test_formatting(messages: List[BaseMessage], expected: str) -> None:
result = convert_messages_to_prompt_llama(messages)
assert result == expected
@pytest.mark.parametrize(
"model_id",
["anthropic.claude-v2", "amazon.titan-text-express-v1"],
)
def test_different_models_bedrock(model_id: str) -> None:
provider = model_id.split(".")[0]
client = MagicMock()
respbody = MagicMock()
if provider == "anthropic":
respbody.read.return_value = MagicMock(
decode=MagicMock(return_value=b'{"completion":"Hi back"}'),
)
client.invoke_model.return_value = {"body": respbody}
elif provider == "amazon":
respbody.read.return_value = '{"results": [{"outputText": "Hi back"}]}'
client.invoke_model.return_value = {"body": respbody}
model = BedrockChat(model_id=model_id, client=client)
# should not throw an error
model.invoke("hello there")
def test_bedrock_combine_llm_output() -> None:
model_id = "anthropic.claude-3-haiku-20240307-v1:0"
client = MagicMock()
llm_outputs = [
{
"model_id": "anthropic.claude-3-haiku-20240307-v1:0",
"usage": {
"completion_tokens": 1,
"prompt_tokens": 2,
"total_tokens": 3,
},
},
{
"model_id": "anthropic.claude-3-haiku-20240307-v1:0",
"usage": {
"completion_tokens": 1,
"prompt_tokens": 2,
"total_tokens": 3,
},
},
]
model = BedrockChat(model_id=model_id, client=client)
final_output = model._combine_llm_outputs(llm_outputs) # type: ignore[arg-type]
assert final_output["model_id"] == model_id
assert final_output["usage"]["completion_tokens"] == 2
assert final_output["usage"]["prompt_tokens"] == 4
assert final_output["usage"]["total_tokens"] == 6
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_dappier.py | """Test EdenAI Chat API wrapper."""
from typing import List
import pytest
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_community.chat_models.dappier import _format_dappier_messages
@pytest.mark.parametrize(
("messages", "expected"),
[
(
[
SystemMessage(
content="You are a chat model with real time search tools"
),
HumanMessage(content="Hello how are you today?"),
],
[
{
"role": "system",
"content": "You are a chat model with real time search tools",
},
{"role": "user", "content": "Hello how are you today?"},
],
)
],
)
def test_dappier_messages_formatting(
messages: List[BaseMessage], expected: str
) -> None:
result = _format_dappier_messages(messages)
assert result == expected
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_snowflake.py | """Test ChatSnowflakeCortex."""
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_community.chat_models.snowflake import _convert_message_to_dict
def test_messages_to_prompt_dict_with_valid_messages() -> None:
messages = [
SystemMessage(content="System Prompt"),
HumanMessage(content="User message #1"),
AIMessage(content="AI message #1"),
HumanMessage(content="User message #2"),
AIMessage(content="AI message #2"),
]
result = [_convert_message_to_dict(m) for m in messages]
expected = [
{"role": "system", "content": "System Prompt"},
{"role": "user", "content": "User message #1"},
{"role": "assistant", "content": "AI message #1"},
{"role": "user", "content": "User message #2"},
{"role": "assistant", "content": "AI message #2"},
]
assert result == expected
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_ollama.py | from typing import List, Literal, Optional
import pytest
from pydantic import BaseModel, ValidationError
from langchain_community.chat_models import ChatOllama
def test_standard_params() -> None:
class ExpectedParams(BaseModel):
ls_provider: str
ls_model_name: str
ls_model_type: Literal["chat", "llm"]
ls_temperature: Optional[float]
ls_max_tokens: Optional[int] = None
ls_stop: Optional[List[str]] = None
model = ChatOllama(model="llama3")
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")
assert ls_params["ls_model_name"] == "llama3"
# Test optional params
model = ChatOllama(num_predict=10, stop=["test"], temperature=0.33)
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")
assert ls_params["ls_max_tokens"] == 10
assert ls_params["ls_stop"] == ["test"]
assert ls_params["ls_temperature"] == 0.33
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_premai.py | """Test PremChat model"""
from typing import cast
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.chat_models import ChatPremAI
from langchain_community.chat_models.premai import (
SINGLE_TOOL_PROMPT_TEMPLATE,
TOOL_PROMPT_HEADER,
_messages_to_prompt_dict,
)
@pytest.mark.requires("premai")
def test_api_key_is_string() -> None:
llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) # type: ignore[call-arg]
assert isinstance(llm.premai_api_key, SecretStr)
@pytest.mark.requires("premai")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) # type: ignore[call-arg]
print(llm.premai_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_messages_to_prompt_dict_with_valid_messages() -> None:
system_message, result = _messages_to_prompt_dict(
[
SystemMessage(content="System Prompt"),
HumanMessage(content="User message #1"),
AIMessage(content="AI message #1"),
HumanMessage(content="User message #2"),
AIMessage(content="AI message #2"),
ToolMessage(content="Tool Message #1", tool_call_id="test_tool"),
AIMessage(content="AI message #3"),
]
)
expected_tool_message = SINGLE_TOOL_PROMPT_TEMPLATE.format(
tool_id="test_tool", tool_response="Tool Message #1"
)
expected = [
{"role": "user", "content": "User message #1"},
{"role": "assistant", "content": "AI message #1"},
{"role": "user", "content": "User message #2"},
{"role": "assistant", "content": "AI message #2"},
{"role": "assistant", "content": "AI message #3"},
{"role": "user", "content": TOOL_PROMPT_HEADER + expected_tool_message},
]
assert system_message == "System Prompt"
assert result == expected
@pytest.mark.requires("premai")
def test_premai_initialization() -> None:
for model in [
ChatPremAI(model="prem-ai-model", premai_api_key="xyz", project_id=8), # type: ignore[call-arg]
ChatPremAI(model_name="prem-ai-model", api_key="xyz", project_id=8), # type: ignore[arg-type, call-arg]
]:
assert model.model == "prem-ai-model"
assert model.temperature is None
assert model.max_tokens is None
assert model.max_retries == 1
assert cast(SecretStr, model.premai_api_key).get_secret_value() == "xyz"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_friendli.py | """Test Friendli LLM for chat."""
from unittest.mock import AsyncMock, MagicMock, Mock
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.adapters.openai import aenumerate
from langchain_community.chat_models import ChatFriendli
@pytest.fixture
def mock_friendli_client() -> Mock:
"""Mock instance of Friendli client."""
return Mock()
@pytest.fixture
def mock_friendli_async_client() -> AsyncMock:
"""Mock instance of Friendli async client."""
return AsyncMock()
@pytest.fixture
def chat_friendli(
mock_friendli_client: Mock, mock_friendli_async_client: AsyncMock
) -> ChatFriendli:
"""Friendli LLM for chat with mock clients."""
return ChatFriendli(
friendli_token=SecretStr("personal-access-token"),
client=mock_friendli_client,
async_client=mock_friendli_async_client,
)
@pytest.mark.requires("friendli")
def test_friendli_token_is_secret_string(capsys: CaptureFixture) -> None:
"""Test if friendli token is stored as a SecretStr."""
fake_token_value = "personal-access-token"
chat = ChatFriendli(friendli_token=fake_token_value) # type: ignore[arg-type]
assert isinstance(chat.friendli_token, SecretStr)
assert chat.friendli_token.get_secret_value() == fake_token_value
print(chat.friendli_token, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("friendli")
def test_friendli_token_read_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test if friendli token can be parsed from environment."""
fake_token_value = "personal-access-token"
monkeypatch.setenv("FRIENDLI_TOKEN", fake_token_value)
chat = ChatFriendli()
assert isinstance(chat.friendli_token, SecretStr)
assert chat.friendli_token.get_secret_value() == fake_token_value
print(chat.friendli_token, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("friendli")
def test_friendli_invoke(
mock_friendli_client: Mock, chat_friendli: ChatFriendli
) -> None:
"""Test invocation with friendli."""
mock_message = Mock()
mock_message.content = "Hello Friendli"
mock_message.role = "assistant"
mock_choice = Mock()
mock_choice.message = mock_message
mock_response = Mock()
mock_response.choices = [mock_choice]
mock_friendli_client.chat.completions.create.return_value = mock_response
result = chat_friendli.invoke("Hello langchain")
assert result.content == "Hello Friendli"
mock_friendli_client.chat.completions.create.assert_called_once_with(
messages=[{"role": "user", "content": "Hello langchain"}],
stream=False,
model=chat_friendli.model,
frequency_penalty=None,
presence_penalty=None,
max_tokens=None,
stop=None,
temperature=None,
top_p=None,
)
@pytest.mark.requires("friendli")
async def test_friendli_ainvoke(
mock_friendli_async_client: AsyncMock, chat_friendli: ChatFriendli
) -> None:
"""Test async invocation with friendli."""
mock_message = Mock()
mock_message.content = "Hello Friendli"
mock_message.role = "assistant"
mock_choice = Mock()
mock_choice.message = mock_message
mock_response = Mock()
mock_response.choices = [mock_choice]
mock_friendli_async_client.chat.completions.create.return_value = mock_response
result = await chat_friendli.ainvoke("Hello langchain")
assert result.content == "Hello Friendli"
mock_friendli_async_client.chat.completions.create.assert_awaited_once_with(
messages=[{"role": "user", "content": "Hello langchain"}],
stream=False,
model=chat_friendli.model,
frequency_penalty=None,
presence_penalty=None,
max_tokens=None,
stop=None,
temperature=None,
top_p=None,
)
@pytest.mark.requires("friendli")
def test_friendli_stream(
mock_friendli_client: Mock, chat_friendli: ChatFriendli
) -> None:
"""Test stream with friendli."""
mock_delta_0 = Mock()
mock_delta_0.content = "Hello "
mock_delta_1 = Mock()
mock_delta_1.content = "Friendli"
mock_choice_0 = Mock()
mock_choice_0.delta = mock_delta_0
mock_choice_1 = Mock()
mock_choice_1.delta = mock_delta_1
mock_chunk_0 = Mock()
mock_chunk_0.choices = [mock_choice_0]
mock_chunk_1 = Mock()
mock_chunk_1.choices = [mock_choice_1]
mock_stream = MagicMock()
mock_chunks = [mock_chunk_0, mock_chunk_1]
mock_stream.__iter__.return_value = mock_chunks
mock_friendli_client.chat.completions.create.return_value = mock_stream
stream = chat_friendli.stream("Hello langchain")
for i, chunk in enumerate(stream):
assert chunk.content == mock_chunks[i].choices[0].delta.content
mock_friendli_client.chat.completions.create.assert_called_once_with(
messages=[{"role": "user", "content": "Hello langchain"}],
stream=True,
model=chat_friendli.model,
frequency_penalty=None,
presence_penalty=None,
max_tokens=None,
stop=None,
temperature=None,
top_p=None,
)
@pytest.mark.requires("friendli")
async def test_friendli_astream(
mock_friendli_async_client: AsyncMock, chat_friendli: ChatFriendli
) -> None:
"""Test async stream with friendli."""
mock_delta_0 = Mock()
mock_delta_0.content = "Hello "
mock_delta_1 = Mock()
mock_delta_1.content = "Friendli"
mock_choice_0 = Mock()
mock_choice_0.delta = mock_delta_0
mock_choice_1 = Mock()
mock_choice_1.delta = mock_delta_1
mock_chunk_0 = Mock()
mock_chunk_0.choices = [mock_choice_0]
mock_chunk_1 = Mock()
mock_chunk_1.choices = [mock_choice_1]
mock_stream = AsyncMock()
mock_chunks = [mock_chunk_0, mock_chunk_1]
mock_stream.__aiter__.return_value = mock_chunks
mock_friendli_async_client.chat.completions.create.return_value = mock_stream
stream = chat_friendli.astream("Hello langchain")
async for i, chunk in aenumerate(stream):
assert chunk.content == mock_chunks[i].choices[0].delta.content
mock_friendli_async_client.chat.completions.create.assert_awaited_once_with(
messages=[{"role": "user", "content": "Hello langchain"}],
stream=True,
model=chat_friendli.model,
frequency_penalty=None,
presence_penalty=None,
max_tokens=None,
stop=None,
temperature=None,
top_p=None,
)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_fireworks.py | """Test Fireworks chat model"""
import sys
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.chat_models import ChatFireworks
if sys.version_info < (3, 9):
pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True)
@pytest.mark.requires("fireworks")
def test_api_key_is_string() -> None:
llm = ChatFireworks(fireworks_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.fireworks_api_key, SecretStr)
@pytest.mark.requires("fireworks")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = ChatFireworks(fireworks_api_key="secret-api-key") # type: ignore[arg-type]
print(llm.fireworks_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_llama_edge.py | import pytest
from langchain_core.messages import (
AIMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.llama_edge import (
LlamaEdgeChatService,
_convert_dict_to_message,
_convert_message_to_dict,
)
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role="system", content="foo")
assert result == expected_output
def test_wasm_chat_without_service_url() -> None:
chat = LlamaEdgeChatService()
# create message sequence
system_message = SystemMessage(content="You are an AI assistant")
user_message = HumanMessage(content="What is the capital of France?")
messages = [system_message, user_message]
with pytest.raises(ValueError) as e:
chat.invoke(messages)
assert "Error code: 503" in str(e)
assert "reason: The IP address or port of the chat service is incorrect." in str(e)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_google_palm.py | """Test Google PaLM Chat API wrapper."""
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_community.chat_models.google_palm import (
ChatGooglePalm,
ChatGooglePalmError,
_messages_to_prompt_dict,
)
def test_messages_to_prompt_dict_with_valid_messages() -> None:
pytest.importorskip("google.generativeai")
result = _messages_to_prompt_dict(
[
SystemMessage(content="Prompt"),
HumanMessage(example=True, content="Human example #1"),
AIMessage(example=True, content="AI example #1"),
HumanMessage(example=True, content="Human example #2"),
AIMessage(example=True, content="AI example #2"),
HumanMessage(content="Real human message"),
AIMessage(content="Real AI message"),
]
)
expected = {
"context": "Prompt",
"examples": [
{"author": "human", "content": "Human example #1"},
{"author": "ai", "content": "AI example #1"},
{"author": "human", "content": "Human example #2"},
{"author": "ai", "content": "AI example #2"},
],
"messages": [
{"author": "human", "content": "Real human message"},
{"author": "ai", "content": "Real AI message"},
],
}
assert result == expected
def test_messages_to_prompt_dict_raises_with_misplaced_system_message() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict(
[
HumanMessage(content="Real human message"),
SystemMessage(content="Prompt"),
]
)
assert "System message must be first" in str(e)
def test_messages_to_prompt_dict_raises_with_misordered_examples() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict(
[
AIMessage(example=True, content="AI example #1"),
HumanMessage(example=True, content="Human example #1"),
]
)
assert "AI example message must be immediately preceded" in str(e)
def test_messages_to_prompt_dict_raises_with_mismatched_examples() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict(
[
HumanMessage(example=True, content="Human example #1"),
AIMessage(example=False, content="AI example #1"),
]
)
assert "Human example message must be immediately followed" in str(e)
def test_messages_to_prompt_dict_raises_with_example_after_real() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict(
[
HumanMessage(example=False, content="Real message"),
HumanMessage(example=True, content="Human example #1"),
AIMessage(example=True, content="AI example #1"),
]
)
assert "Message examples must come before other" in str(e)
def test_chat_google_raises_with_invalid_temperature() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ValueError) as e:
ChatGooglePalm(google_api_key="fake", temperature=2.0) # type: ignore[arg-type, call-arg]
assert "must be in the range" in str(e)
def test_chat_google_raises_with_invalid_top_p() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ValueError) as e:
ChatGooglePalm(google_api_key="fake", top_p=2.0) # type: ignore[arg-type, call-arg]
assert "must be in the range" in str(e)
def test_chat_google_raises_with_invalid_top_k() -> None:
pytest.importorskip("google.generativeai")
with pytest.raises(ValueError) as e:
ChatGooglePalm(google_api_key="fake", top_k=-5) # type: ignore[arg-type, call-arg]
assert "must be positive" in str(e)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py | """Test AzureML chat endpoint."""
import os
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, FixtureRequest
from langchain_community.chat_models.azureml_endpoint import AzureMLChatOnlineEndpoint
@pytest.fixture(scope="class")
def api_passed_via_environment_fixture() -> AzureMLChatOnlineEndpoint:
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from environment"""
os.environ["AZUREML_ENDPOINT_API_KEY"] = "my-api-key"
azure_chat = AzureMLChatOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score"
)
del os.environ["AZUREML_ENDPOINT_API_KEY"]
return azure_chat
@pytest.fixture(scope="class")
def api_passed_via_constructor_fixture() -> AzureMLChatOnlineEndpoint:
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from constructor"""
azure_chat = AzureMLChatOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key", # type: ignore[arg-type]
)
return azure_chat
@pytest.mark.parametrize(
"fixture_name",
["api_passed_via_constructor_fixture", "api_passed_via_environment_fixture"],
)
class TestAzureMLChatOnlineEndpoint:
def test_api_key_is_secret_string(
self, fixture_name: str, request: FixtureRequest
) -> None:
"""Test that the API key is a SecretStr instance"""
azure_chat = request.getfixturevalue(fixture_name)
assert isinstance(azure_chat.endpoint_api_key, SecretStr)
def test_api_key_masked(
self, fixture_name: str, request: FixtureRequest, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked"""
azure_chat = request.getfixturevalue(fixture_name)
print(azure_chat.endpoint_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert (
(str(azure_chat.endpoint_api_key) == "**********")
and (repr(azure_chat.endpoint_api_key) == "SecretStr('**********')")
and (captured.out == "**********")
)
def test_api_key_is_readable(
self, fixture_name: str, request: FixtureRequest
) -> None:
"""Test that the real secret value of the API key can be read"""
azure_chat = request.getfixturevalue(fixture_name)
assert azure_chat.endpoint_api_key.get_secret_value() == "my-api-key"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_yandex.py | import os
from unittest import mock
from unittest.mock import MagicMock
import pytest
from langchain_community.chat_models.yandex import ChatYandexGPT
def test_yandexgpt_initialization() -> None:
llm = ChatYandexGPT(
iam_token="your_iam_token", # type: ignore[arg-type]
api_key="your_api_key", # type: ignore[arg-type]
folder_id="your_folder_id",
)
assert llm.model_name == "yandexgpt-lite"
assert llm.model_uri.startswith("gpt://your_folder_id/yandexgpt-lite/")
def test_yandexgpt_model_params() -> None:
llm = ChatYandexGPT(
model_name="custom-model",
model_version="v1",
iam_token="your_iam_token", # type: ignore[arg-type]
api_key="your_api_key", # type: ignore[arg-type]
folder_id="your_folder_id",
)
assert llm.model_name == "custom-model"
assert llm.model_version == "v1"
assert llm.iam_token.get_secret_value() == "your_iam_token"
assert llm.model_uri == "gpt://your_folder_id/custom-model/v1"
def test_yandexgpt_invalid_model_params() -> None:
with pytest.raises(ValueError):
ChatYandexGPT(model_uri="", iam_token="your_iam_token") # type: ignore[arg-type]
with pytest.raises(ValueError):
ChatYandexGPT(
iam_token="", # type: ignore[arg-type]
api_key="your_api_key", # type: ignore[arg-type]
model_uri="",
)
@pytest.mark.parametrize(
"api_key_or_token", [dict(api_key="bogus"), dict(iam_token="bogus")]
)
@pytest.mark.parametrize(
"disable_logging",
[dict(), dict(disable_request_logging=True), dict(disable_request_logging=False)],
)
@mock.patch.dict(os.environ, {}, clear=True)
def test_completion_call(api_key_or_token: dict, disable_logging: dict) -> None:
absent_yandex_module_stub = MagicMock()
grpc_mock = MagicMock()
with mock.patch.dict(
"sys.modules",
{
"yandex.cloud.ai.foundation_models.v1."
"text_common_pb2": absent_yandex_module_stub,
"yandex.cloud.ai.foundation_models.v1.text_generation."
"text_generation_service_pb2": absent_yandex_module_stub,
"yandex.cloud.ai.foundation_models.v1.text_generation."
"text_generation_service_pb2_grpc": absent_yandex_module_stub,
"grpc": grpc_mock,
},
):
grpc_mock.RpcError = Exception
stub = absent_yandex_module_stub.TextGenerationServiceStub
request_stub = absent_yandex_module_stub.CompletionRequest
msg_constructor_stub = absent_yandex_module_stub.Message
args = {"folder_id": "fldr", **api_key_or_token, **disable_logging}
ygpt = ChatYandexGPT(**args)
grpc_call_mock = stub.return_value.Completion
msg_mock = mock.Mock()
msg_mock.message.text = "cmpltn"
res_mock = mock.Mock()
res_mock.alternatives = [msg_mock]
grpc_call_mock.return_value = [res_mock]
act_emb = ygpt.invoke("nomatter")
assert act_emb.content == "cmpltn"
assert len(grpc_call_mock.call_args_list) == 1
once_called_args = grpc_call_mock.call_args_list[0]
act_model_uri = request_stub.call_args_list[0].kwargs["model_uri"]
act_text = msg_constructor_stub.call_args_list[0].kwargs["text"]
act_metadata = once_called_args.kwargs["metadata"]
assert "fldr" in act_model_uri
assert act_text == "nomatter"
assert act_metadata
assert len(act_metadata) > 0
if disable_logging.get("disable_request_logging"):
assert ("x-data-logging-enabled", "false") in act_metadata
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_mlflow.py | import json
from typing import Any, Dict, List
from unittest.mock import MagicMock
import pytest
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolCallChunk,
ToolMessageChunk,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import StructuredTool
from pydantic import BaseModel
from langchain_community.chat_models.mlflow import ChatMlflow
@pytest.fixture
def llm() -> ChatMlflow:
return ChatMlflow(
endpoint="databricks-meta-llama-3-70b-instruct", target_uri="databricks"
)
@pytest.fixture
def model_input() -> List[BaseMessage]:
data = [
{
"role": "system",
"content": "You are a helpful assistant.",
},
{"role": "user", "content": "36939 * 8922.4"},
]
return [ChatMlflow._convert_dict_to_message(value) for value in data]
@pytest.fixture
def mock_prediction() -> dict:
return {
"id": "chatcmpl_id",
"object": "chat.completion",
"created": 1721875529,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "To calculate the result of 36939 multiplied by 8922.4, "
"I get:\n\n36939 x 8922.4 = 329,511,111.6",
},
"finish_reason": "stop",
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 36, "total_tokens": 66},
}
@pytest.fixture
def mock_predict_stream_result() -> List[dict]:
return [
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": "36939"},
"finish_reason": None,
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 20, "total_tokens": 50},
},
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": "x"},
"finish_reason": None,
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 22, "total_tokens": 52},
},
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": "8922.4"},
"finish_reason": None,
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 24, "total_tokens": 54},
},
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": " = "},
"finish_reason": None,
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 28, "total_tokens": 58},
},
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": "329,511,111.6"},
"finish_reason": None,
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 30, "total_tokens": 60},
},
{
"id": "chatcmpl_bb1fce87-f14e-4ae1-ac22-89facc74898a",
"object": "chat.completion.chunk",
"created": 1721877054,
"model": "meta-llama-3.1-70b-instruct-072424",
"choices": [
{
"index": 0,
"delta": {"role": "assistant", "content": ""},
"finish_reason": "stop",
"logprobs": None,
}
],
"usage": {"prompt_tokens": 30, "completion_tokens": 36, "total_tokens": 66},
},
]
@pytest.mark.requires("mlflow")
def test_chat_mlflow_predict(
llm: ChatMlflow, model_input: List[BaseMessage], mock_prediction: dict
) -> None:
mock_client = MagicMock()
llm._client = mock_client
def mock_predict(*args: Any, **kwargs: Any) -> Any:
return mock_prediction
mock_client.predict = mock_predict
res = llm.invoke(model_input)
assert res.content == mock_prediction["choices"][0]["message"]["content"]
@pytest.mark.requires("mlflow")
def test_chat_mlflow_stream(
llm: ChatMlflow,
model_input: List[BaseMessage],
mock_predict_stream_result: List[dict],
) -> None:
mock_client = MagicMock()
llm._client = mock_client
def mock_stream(*args: Any, **kwargs: Any) -> Any:
yield from mock_predict_stream_result
mock_client.predict_stream = mock_stream
for i, res in enumerate(llm.stream(model_input)):
assert (
res.content
== mock_predict_stream_result[i]["choices"][0]["delta"]["content"]
)
@pytest.mark.requires("mlflow")
def test_chat_mlflow_bind_tools(
llm: ChatMlflow, mock_predict_stream_result: List[dict]
) -> None:
mock_client = MagicMock()
llm._client = mock_client
def mock_stream(*args: Any, **kwargs: Any) -> Any:
yield from mock_predict_stream_result
mock_client.predict_stream = mock_stream
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. Make sure to use tool for information.",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
def mock_func(x: int, y: int) -> str:
return "36939 x 8922.4 = 329,511,111.6"
class ArgsSchema(BaseModel):
x: int
y: int
tools = [
StructuredTool(
name="name",
description="description",
args_schema=ArgsSchema,
func=mock_func,
)
]
agent = create_tool_calling_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) # type: ignore[arg-type]
result = agent_executor.invoke({"input": "36939 * 8922.4"})
assert result["output"] == "36939x8922.4 = 329,511,111.6"
def test_convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = ChatMlflow._convert_dict_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test_convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = ChatMlflow._convert_dict_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
tool_calls = [
{
"id": "call_fb5f5e1a-bac0-4422-95e9-d06e6022ad12",
"type": "function",
"function": {
"name": "main__test__python_exec",
"arguments": '{"code": "result = 36939 * 8922.4" }',
},
}
]
message_with_tools: Dict[str, Any] = {
"role": "assistant",
"content": None,
"tool_calls": tool_calls,
}
result = ChatMlflow._convert_dict_to_message(message_with_tools)
expected_output = AIMessage(
content="",
additional_kwargs={"tool_calls": tool_calls},
id="call_fb5f5e1a-bac0-4422-95e9-d06e6022ad12",
tool_calls=[
{
"name": tool_calls[0]["function"]["name"], # type: ignore[index]
"args": json.loads(tool_calls[0]["function"]["arguments"]), # type: ignore[index]
"id": "call_fb5f5e1a-bac0-4422-95e9-d06e6022ad12",
"type": "tool_call",
}
],
)
def test_convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = ChatMlflow._convert_dict_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test_convert_dict_to_message_chat() -> None:
message = {"role": "any_role", "content": "foo"}
result = ChatMlflow._convert_dict_to_message(message)
expected_output = ChatMessage(content="foo", role="any_role")
assert result == expected_output
def test_convert_delta_to_message_chunk_ai() -> None:
delta = {"role": "assistant", "content": "foo"}
result = ChatMlflow._convert_delta_to_message_chunk(delta, "default_role")
expected_output = AIMessageChunk(content="foo")
assert result == expected_output
delta_with_tools: Dict[str, Any] = {
"role": "assistant",
"content": None,
"tool_calls": [{"index": 0, "function": {"arguments": " }"}}],
}
result = ChatMlflow._convert_delta_to_message_chunk(delta_with_tools, "role")
expected_output = AIMessageChunk(
content="",
additional_kwargs={"tool_calls": delta_with_tools["tool_calls"]},
id=None,
tool_call_chunks=[ToolCallChunk(name=None, args=" }", id=None, index=0)],
)
assert result == expected_output
def test_convert_delta_to_message_chunk_tool() -> None:
delta = {
"role": "tool",
"content": "foo",
"tool_call_id": "tool_call_id",
"id": "some_id",
}
result = ChatMlflow._convert_delta_to_message_chunk(delta, "default_role")
expected_output = ToolMessageChunk(
content="foo", id="some_id", tool_call_id="tool_call_id"
)
assert result == expected_output
def test_convert_delta_to_message_chunk_human() -> None:
delta = {
"role": "user",
"content": "foo",
}
result = ChatMlflow._convert_delta_to_message_chunk(delta, "default_role")
expected_output = HumanMessageChunk(content="foo")
assert result == expected_output
def test_convert_delta_to_message_chunk_system() -> None:
delta = {
"role": "system",
"content": "foo",
}
result = ChatMlflow._convert_delta_to_message_chunk(delta, "default_role")
expected_output = SystemMessageChunk(content="foo")
assert result == expected_output
def test_convert_delta_to_message_chunk_chat() -> None:
delta = {
"role": "any_role",
"content": "foo",
}
result = ChatMlflow._convert_delta_to_message_chunk(delta, "default_role")
expected_output = ChatMessageChunk(content="foo", role="any_role")
assert result == expected_output
def test_convert_message_to_dict_human() -> None:
human_message = HumanMessage(content="foo")
result = ChatMlflow._convert_message_to_dict(human_message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test_convert_message_to_dict_system() -> None:
system_message = SystemMessage(content="foo")
result = ChatMlflow._convert_message_to_dict(system_message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test_convert_message_to_dict_ai() -> None:
ai_message = AIMessage(content="foo")
result = ChatMlflow._convert_message_to_dict(ai_message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
ai_message = AIMessage(
content="",
tool_calls=[{"name": "name", "args": {}, "id": "id", "type": "tool_call"}],
)
result = ChatMlflow._convert_message_to_dict(ai_message)
expected_output_with_tools: Dict[str, Any] = {
"content": None,
"role": "assistant",
"tool_calls": [
{
"type": "function",
"id": "id",
"function": {"name": "name", "arguments": "{}"},
}
],
}
assert result == expected_output_with_tools
def test_convert_message_to_dict_tool() -> None:
tool_message = ToolMessageChunk(
content="foo", id="some_id", tool_call_id="tool_call_id"
)
result = ChatMlflow._convert_message_to_dict(tool_message)
expected_output = {
"role": "tool",
"content": "foo",
"tool_call_id": "tool_call_id",
}
assert result == expected_output
def test_convert_message_to_dict_function() -> None:
with pytest.raises(ValueError):
ChatMlflow._convert_message_to_dict(FunctionMessage(content="", name="name"))
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_yuan2.py | """Test ChatYuan2 wrapper."""
import pytest
from langchain_core.messages import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.yuan2 import (
ChatYuan2,
_convert_dict_to_message,
_convert_message_to_dict,
)
@pytest.mark.requires("openai")
def test_yuan2_model_param() -> None:
chat = ChatYuan2(model="foo") # type: ignore[call-arg]
assert chat.model_name == "foo"
chat = ChatYuan2(model_name="foo") # type: ignore[call-arg]
assert chat.model_name == "foo"
@pytest.mark.requires("openai")
def test_yuan2_timeout_param() -> None:
chat = ChatYuan2(request_timeout=5) # type: ignore[call-arg]
assert chat.request_timeout == 5
chat = ChatYuan2(timeout=10) # type: ignore[call-arg]
assert chat.request_timeout == 10
@pytest.mark.requires("openai")
def test_yuan2_stop_sequences_param() -> None:
chat = ChatYuan2(stop=["<eod>"]) # type: ignore[call-arg]
assert chat.stop == ["<eod>"]
chat = ChatYuan2(stop_sequences=["<eod>"]) # type: ignore[call-arg]
assert chat.stop == ["<eod>"]
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="hello")
assert result == expected_output
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_oci_model_deployment_endpoint.py | """Test OCI Data Science Model Deployment Endpoint."""
import pytest
import responses
from langchain_core.messages import AIMessage, HumanMessage
from pytest_mock import MockerFixture
from langchain_community.chat_models import ChatOCIModelDeployment
@pytest.mark.requires("ads")
def test_initialization(mocker: MockerFixture) -> None:
"""Test chat model initialization."""
mocker.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
chat = ChatOCIModelDeployment(
model="odsc",
endpoint="test_endpoint",
model_kwargs={"temperature": 0.2},
)
assert chat.model == "odsc"
assert chat.endpoint == "test_endpoint"
assert chat.model_kwargs == {"temperature": 0.2}
assert chat._identifying_params == {
"endpoint": chat.endpoint,
"model_kwargs": {"temperature": 0.2},
"model": chat.model,
"stop": chat.stop,
"stream": chat.streaming,
}
@pytest.mark.requires("ads")
@responses.activate
def test_call(mocker: MockerFixture) -> None:
"""Test valid call to oci model deployment endpoint."""
endpoint = "https://MD_OCID/predict"
responses.add(
responses.POST,
endpoint,
json={
"id": "cmpl-88159e77c92f46088faad75fce2e26a1",
"object": "chat.completion",
"created": 274246,
"model": "odsc-llm",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello World",
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 10,
"total_tokens": 20,
"completion_tokens": 10,
},
},
status=200,
)
mocker.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
chat = ChatOCIModelDeployment(endpoint=endpoint)
output = chat.invoke("this is a test.")
assert isinstance(output, AIMessage)
assert output.response_metadata == {
"token_usage": {
"prompt_tokens": 10,
"total_tokens": 20,
"completion_tokens": 10,
},
"model_name": "odsc-llm",
"system_fingerprint": "",
"finish_reason": "stop",
}
@pytest.mark.requires("ads")
@responses.activate
def test_construct_json_body(mocker: MockerFixture) -> None:
"""Tests constructing json body that will be sent to endpoint."""
mocker.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
messages = [
HumanMessage(content="User message"),
]
chat = ChatOCIModelDeployment(
endpoint="test_endpoint", model_kwargs={"temperature": 0.2}
)
payload = chat._construct_json_body(messages, chat._invocation_params(stop=None))
assert payload == {
"messages": [{"content": "User message", "role": "user"}],
"model": chat.model,
"stop": None,
"stream": chat.streaming,
"temperature": 0.2,
}
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_azureopenai.py | import json
import os
from unittest import mock
import pytest
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
@mock.patch.dict(
os.environ,
{
"OPENAI_API_KEY": "test",
"OPENAI_API_BASE": "https://oai.azure.com/",
"OPENAI_API_VERSION": "2023-05-01",
},
)
@pytest.mark.requires("openai")
@pytest.mark.parametrize(
"model_name", ["gpt-4", "gpt-4-32k", "gpt-35-turbo", "gpt-35-turbo-16k"]
)
def test_model_name_set_on_chat_result_when_present_in_response(
model_name: str,
) -> None:
sample_response_text = f"""
{{
"id": "chatcmpl-7ryweq7yc8463fas879t9hdkkdf",
"object": "chat.completion",
"created": 1690381189,
"model": "{model_name}",
"choices": [
{{
"index": 0,
"finish_reason": "stop",
"message": {{
"role": "assistant",
"content": "I'm an AI assistant that can help you."
}}
}}
],
"usage": {{
"completion_tokens": 28,
"prompt_tokens": 15,
"total_tokens": 43
}}
}}
"""
# convert sample_response_text to instance of Mapping[str, Any]
sample_response = json.loads(sample_response_text)
mock_chat = AzureChatOpenAI()
chat_result = mock_chat._create_chat_result(sample_response)
assert (
chat_result.llm_output is not None
and chat_result.llm_output["model_name"] == model_name
)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/konko.py | """Evaluate ChatKonko Interface."""
from typing import Any
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_core.outputs import ChatGeneration, ChatResult, LLMResult
from langchain_community.chat_models.konko import ChatKonko
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_konko_chat_test() -> None:
"""Evaluate basic ChatKonko functionality."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content="Hi")
chat_response = chat_instance.invoke([msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_chat_test_openai() -> None:
"""Evaluate basic ChatKonko functionality."""
chat_instance = ChatKonko(max_tokens=10, model="meta-llama/llama-2-70b-chat")
msg = HumanMessage(content="Hi")
chat_response = chat_instance.invoke([msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_model_test() -> None:
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(model="alpha")
assert chat_instance.model == "alpha"
chat_instance = ChatKonko(model="beta")
assert chat_instance.model == "beta"
def test_konko_available_model_test() -> None:
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(max_tokens=10, n=2)
res = chat_instance.get_available_models()
assert isinstance(res, set)
def test_konko_system_msg_test() -> None:
"""Evaluate ChatKonko's handling of system messages."""
chat_instance = ChatKonko(max_tokens=10)
sys_msg = SystemMessage(content="Initiate user chat.")
user_msg = HumanMessage(content="Hi there")
chat_response = chat_instance.invoke([sys_msg, user_msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_generation_test() -> None:
"""Check ChatKonko's generation ability."""
chat_instance = ChatKonko(max_tokens=10, n=2)
msg = HumanMessage(content="Hi")
gen_response = chat_instance.generate([[msg], [msg]])
assert isinstance(gen_response, LLMResult)
assert len(gen_response.generations) == 2
for gen_list in gen_response.generations:
assert len(gen_list) == 2
for gen in gen_list:
assert isinstance(gen, ChatGeneration)
assert isinstance(gen.text, str)
assert gen.text == gen.message.content
def test_konko_multiple_outputs_test() -> None:
"""Test multiple completions with ChatKonko."""
chat_instance = ChatKonko(max_tokens=10, n=5)
msg = HumanMessage(content="Hi")
gen_response = chat_instance._generate([msg])
assert isinstance(gen_response, ChatResult)
assert len(gen_response.generations) == 5
for gen in gen_response.generations:
assert isinstance(gen.message, BaseMessage)
assert isinstance(gen.message.content, str)
def test_konko_streaming_callback_test() -> None:
"""Evaluate streaming's token callback functionality."""
callback_instance = FakeCallbackHandler()
callback_mgr = CallbackManager([callback_instance])
chat_instance = ChatKonko(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_mgr,
verbose=True,
)
msg = HumanMessage(content="Hi")
chat_response = chat_instance.invoke([msg])
assert callback_instance.llm_streams > 0
assert isinstance(chat_response, BaseMessage)
def test_konko_streaming_info_test() -> None:
"""Ensure generation details are retained during streaming."""
class TestCallback(FakeCallbackHandler):
data_store: dict = {}
def on_llm_end(self, *args: Any, **kwargs: Any) -> Any:
self.data_store["generation"] = args[0]
callback_instance = TestCallback()
callback_mgr = CallbackManager([callback_instance])
chat_instance = ChatKonko(
max_tokens=2,
temperature=0,
callback_manager=callback_mgr,
)
list(chat_instance.stream("hey"))
gen_data = callback_instance.data_store["generation"]
assert gen_data.generations[0][0].text == " Hey"
def test_konko_llm_model_name_test() -> None:
"""Check if llm_output has model info."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content="Hi")
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output["model_name"] == chat_instance.model
def test_konko_streaming_model_name_test() -> None:
"""Check model info during streaming."""
chat_instance = ChatKonko(max_tokens=10, streaming=True)
msg = HumanMessage(content="Hi")
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output["model_name"] == chat_instance.model
def test_konko_streaming_param_validation_test() -> None:
"""Ensure correct token callback during streaming."""
with pytest.raises(ValueError):
ChatKonko(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_konko_additional_args_test() -> None:
"""Evaluate extra arguments for ChatKonko."""
chat_instance = ChatKonko(extra=3, max_tokens=10) # type: ignore[call-arg]
assert chat_instance.max_tokens == 10
assert chat_instance.model_kwargs == {"extra": 3}
chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) # type: ignore[call-arg]
assert chat_instance.model_kwargs == {"extra": 3, "addition": 2}
with pytest.raises(ValueError):
ChatKonko(extra=3, model_kwargs={"extra": 2}) # type: ignore[call-arg]
with pytest.raises(ValueError):
ChatKonko(model_kwargs={"temperature": 0.2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={"model": "text-davinci-003"})
def test_konko_token_streaming_test() -> None:
"""Check token streaming for ChatKonko."""
chat_instance = ChatKonko(max_tokens=10)
for token in chat_instance.stream("Just a test"):
assert isinstance(token.content, str)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_oci_data_science.py | # Copyright (c) 2024, Oracle and/or its affiliates.
"""Test Chat model for OCI Data Science Model Deployment Endpoint."""
import sys
from typing import Any, AsyncGenerator, Dict, Generator
from unittest import mock
import pytest
from langchain_core.messages import AIMessage, AIMessageChunk
from requests.exceptions import HTTPError
from langchain_community.chat_models import (
ChatOCIModelDeploymentTGI,
ChatOCIModelDeploymentVLLM,
)
CONST_MODEL_NAME = "odsc-vllm"
CONST_ENDPOINT = "https://oci.endpoint/ocid/predict"
CONST_PROMPT = "This is a prompt."
CONST_COMPLETION = "This is a completion."
CONST_COMPLETION_RESPONSE = {
"id": "chat-123456789",
"object": "chat.completion",
"created": 123456789,
"model": "mistral",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": CONST_COMPLETION,
"tool_calls": [],
},
"logprobs": None,
"finish_reason": "length",
"stop_reason": None,
}
],
"usage": {"prompt_tokens": 115, "total_tokens": 371, "completion_tokens": 256},
"prompt_logprobs": None,
}
CONST_COMPLETION_RESPONSE_TGI = {"generated_text": CONST_COMPLETION}
CONST_STREAM_TEMPLATE = (
'data: {"id":"chat-123456","object":"chat.completion.chunk","created":123456789,'
'"model":"odsc-llm","choices":[{"index":0,"delta":<DELTA>,"finish_reason":null}]}'
)
CONST_STREAM_DELTAS = ['{"role":"assistant","content":""}'] + [
'{"content":" ' + word + '"}' for word in CONST_COMPLETION.split(" ")
]
CONST_STREAM_RESPONSE = (
content
for content in [
CONST_STREAM_TEMPLATE.replace("<DELTA>", delta).encode()
for delta in CONST_STREAM_DELTAS
]
+ [b"data: [DONE]"]
)
CONST_ASYNC_STREAM_TEMPLATE = (
'{"id":"chat-123456","object":"chat.completion.chunk","created":123456789,'
'"model":"odsc-llm","choices":[{"index":0,"delta":<DELTA>,"finish_reason":null}]}'
)
CONST_ASYNC_STREAM_RESPONSE = (
CONST_ASYNC_STREAM_TEMPLATE.replace("<DELTA>", delta).encode()
for delta in CONST_STREAM_DELTAS
)
pytestmark = pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires Python 3.9 or higher"
)
class MockResponse:
"""Represents a mocked response."""
def __init__(self, json_data: Dict, status_code: int = 200):
self.json_data = json_data
self.status_code = status_code
def raise_for_status(self) -> None:
"""Mocked raise for status."""
if 400 <= self.status_code < 600:
raise HTTPError()
def json(self) -> Dict:
"""Returns mocked json data."""
return self.json_data
def iter_lines(self, chunk_size: int = 4096) -> Generator[bytes, None, None]:
"""Returns a generator of mocked streaming response."""
return CONST_STREAM_RESPONSE
@property
def text(self) -> str:
"""Returns the mocked text representation."""
return ""
def mocked_requests_post(url: str, **kwargs: Any) -> MockResponse:
"""Method to mock post requests"""
payload: dict = kwargs.get("json", {})
messages: list = payload.get("messages", [])
prompt = messages[0].get("content")
if prompt == CONST_PROMPT:
return MockResponse(json_data=CONST_COMPLETION_RESPONSE)
return MockResponse(
json_data={},
status_code=404,
)
@pytest.mark.requires("ads")
@pytest.mark.requires("langchain_openai")
@mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
@mock.patch("requests.post", side_effect=mocked_requests_post)
def test_invoke_vllm(*args: Any) -> None:
"""Tests invoking vLLM endpoint."""
llm = ChatOCIModelDeploymentVLLM(endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME)
output = llm.invoke(CONST_PROMPT)
assert isinstance(output, AIMessage)
assert output.content == CONST_COMPLETION
@pytest.mark.requires("ads")
@pytest.mark.requires("langchain_openai")
@mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
@mock.patch("requests.post", side_effect=mocked_requests_post)
def test_invoke_tgi(*args: Any) -> None:
"""Tests invoking TGI endpoint using OpenAI Spec."""
llm = ChatOCIModelDeploymentTGI(endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME)
output = llm.invoke(CONST_PROMPT)
assert isinstance(output, AIMessage)
assert output.content == CONST_COMPLETION
@pytest.mark.requires("ads")
@pytest.mark.requires("langchain_openai")
@mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
@mock.patch("requests.post", side_effect=mocked_requests_post)
def test_stream_vllm(*args: Any) -> None:
"""Tests streaming with vLLM endpoint using OpenAI spec."""
llm = ChatOCIModelDeploymentVLLM(
endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME, streaming=True
)
output = None
count = 0
for chunk in llm.stream(CONST_PROMPT):
assert isinstance(chunk, AIMessageChunk)
if output is None:
output = chunk
else:
output += chunk
count += 1
assert count == 5
assert output is not None
if output is not None:
assert str(output.content).strip() == CONST_COMPLETION
async def mocked_async_streaming_response(
*args: Any, **kwargs: Any
) -> AsyncGenerator[bytes, None]:
"""Returns mocked response for async streaming."""
for item in CONST_ASYNC_STREAM_RESPONSE:
yield item
@pytest.mark.asyncio
@pytest.mark.requires("ads")
@pytest.mark.requires("langchain_openai")
@mock.patch(
"ads.common.auth.default_signer", return_value=dict(signer=mock.MagicMock())
)
@mock.patch(
"langchain_community.utilities.requests.Requests.apost",
mock.MagicMock(),
)
async def test_stream_async(*args: Any) -> None:
"""Tests async streaming."""
llm = ChatOCIModelDeploymentVLLM(
endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME, streaming=True
)
with mock.patch.object(
llm,
"_aiter_sse",
mock.MagicMock(return_value=mocked_async_streaming_response()),
):
chunks = [str(chunk.content) async for chunk in llm.astream(CONST_PROMPT)]
assert "".join(chunks).strip() == CONST_COMPLETION
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_deepinfra.py | from langchain_community.chat_models import ChatDeepInfra
def test_deepinfra_model_name_param() -> None:
llm = ChatDeepInfra(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
def test_deepinfra_model_param() -> None:
llm = ChatDeepInfra(model="foo")
assert llm.model_name == "foo"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_anthropic.py | """Test Anthropic Chat API wrapper."""
import os
from typing import List
import pytest
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_community.chat_models import ChatAnthropic
from langchain_community.chat_models.anthropic import (
convert_messages_to_prompt_anthropic,
)
os.environ["ANTHROPIC_API_KEY"] = "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = ChatAnthropic(model_name="foo")
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = ChatAnthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_kwargs() -> None:
llm = ChatAnthropic(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("anthropic")
def test_anthropic_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
assert llm.max_tokens_to_sample == 5
llm = ChatAnthropic(model_kwargs={"max_tokens": 5})
assert llm.max_tokens_to_sample == 5
@pytest.mark.requires("anthropic")
def test_anthropic_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = ChatAnthropic(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("anthropic")
def test_anthropic_initialization() -> None:
"""Test anthropic initialization."""
# Verify that chat anthropic can be initialized using a secret key provided
# as a parameter rather than an environment variable.
ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[arg-type, call-arg]
@pytest.mark.parametrize(
("messages", "expected"),
[
([HumanMessage(content="Hello")], "\n\nHuman: Hello\n\nAssistant:"),
(
[HumanMessage(content="Hello"), AIMessage(content="Answer:")],
"\n\nHuman: Hello\n\nAssistant: Answer:",
),
(
[
SystemMessage(content="You're an assistant"),
HumanMessage(content="Hello"),
AIMessage(content="Answer:"),
],
"You're an assistant\n\nHuman: Hello\n\nAssistant: Answer:",
),
],
)
def test_formatting(messages: List[BaseMessage], expected: str) -> None:
result = convert_messages_to_prompt_anthropic(messages)
assert result == expected
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_reka.py | import json
import os
from typing import Any, Dict, List
from unittest.mock import MagicMock, patch
import pytest
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from pydantic import ValidationError
from langchain_community.chat_models import ChatReka
from langchain_community.chat_models.reka import (
convert_to_reka_messages,
process_content,
)
os.environ["REKA_API_KEY"] = "dummy_key"
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_model_param() -> None:
llm = ChatReka(model="reka-flash")
assert llm.model == "reka-flash"
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_model_kwargs() -> None:
llm = ChatReka(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_incorrect_field() -> None:
"""Test that providing an incorrect field raises ValidationError."""
with pytest.raises(ValidationError):
ChatReka(unknown_field="bar") # type: ignore
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_initialization() -> None:
"""Test Reka initialization."""
# Verify that ChatReka can be initialized using a secret key provided
# as a parameter rather than an environment variable.
ChatReka(model="reka-flash", reka_api_key="test_key")
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
@pytest.mark.parametrize(
("content", "expected"),
[
("Hello", [{"type": "text", "text": "Hello"}]),
(
[
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": "https://example.com/image.jpg",
},
],
[
{"type": "text", "text": "Describe this image"},
{"type": "image_url", "image_url": "https://example.com/image.jpg"},
],
),
(
[
{"type": "text", "text": "Hello"},
{
"type": "image_url",
"image_url": {"url": "https://example.com/image.jpg"},
},
],
[
{"type": "text", "text": "Hello"},
{"type": "image_url", "image_url": "https://example.com/image.jpg"},
],
),
],
)
def test_process_content(content: Any, expected: List[Dict[str, Any]]) -> None:
result = process_content(content)
assert result == expected
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
@pytest.mark.parametrize(
("messages", "expected"),
[
(
[HumanMessage(content="Hello")],
[{"role": "user", "content": [{"type": "text", "text": "Hello"}]}],
),
(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": "https://example.com/image.jpg",
},
]
),
AIMessage(content="It's a beautiful landscape."),
],
[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": "https://example.com/image.jpg",
},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "It's a beautiful landscape."}
],
},
],
),
],
)
def test_convert_to_reka_messages(
messages: List[BaseMessage], expected: List[Dict[str, Any]]
) -> None:
result = convert_to_reka_messages(messages)
assert result == expected
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_streaming() -> None:
llm = ChatReka(streaming=True)
assert llm.streaming is True
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_temperature() -> None:
llm = ChatReka(temperature=0.5)
assert llm.temperature == 0.5
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_max_tokens() -> None:
llm = ChatReka(max_tokens=100)
assert llm.max_tokens == 100
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_default_params() -> None:
llm = ChatReka()
assert llm._default_params == {
"max_tokens": 256,
"model": "reka-flash",
}
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_identifying_params() -> None:
"""Test that ChatReka identifies its default parameters correctly."""
chat = ChatReka(model="reka-flash", temperature=0.7, max_tokens=256)
expected_params = {
"model": "reka-flash",
"temperature": 0.7,
"max_tokens": 256,
}
assert chat._default_params == expected_params
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_llm_type() -> None:
llm = ChatReka()
assert llm._llm_type == "reka-chat"
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_reka_tool_use_with_mocked_response() -> None:
with patch("reka.client.Reka") as MockReka:
# Mock the Reka client
mock_client = MockReka.return_value
mock_chat = MagicMock()
mock_client.chat = mock_chat
mock_response = MagicMock()
mock_message = MagicMock()
mock_tool_call = MagicMock()
mock_tool_call.id = "tool_call_1"
mock_tool_call.name = "search_tool"
mock_tool_call.parameters = {"query": "LangChain"}
mock_message.tool_calls = [mock_tool_call]
mock_message.content = None
mock_response.responses = [MagicMock(message=mock_message)]
mock_chat.create.return_value = mock_response
llm = ChatReka()
messages: List[BaseMessage] = [HumanMessage(content="Tell me about LangChain")]
result = llm._generate(messages)
assert len(result.generations) == 1
ai_message = result.generations[0].message
assert ai_message.content == ""
assert "tool_calls" in ai_message.additional_kwargs
tool_calls = ai_message.additional_kwargs["tool_calls"]
assert len(tool_calls) == 1
assert tool_calls[0]["id"] == "tool_call_1"
assert tool_calls[0]["function"]["name"] == "search_tool"
assert tool_calls[0]["function"]["arguments"] == json.dumps(
{"query": "LangChain"}
)
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
@pytest.mark.parametrize(
("messages", "expected"),
[
# Test single system message
(
[
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hello"),
],
[
{
"role": "user",
"content": [
{"type": "text", "text": "You are a helpful assistant.\nHello"}
],
}
],
),
# Test system message with multiple messages
(
[
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="What is 2+2?"),
AIMessage(content="4"),
HumanMessage(content="Thanks!"),
],
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "You are a helpful assistant.\nWhat is 2+2?",
}
],
},
{"role": "assistant", "content": [{"type": "text", "text": "4"}]},
{"role": "user", "content": [{"type": "text", "text": "Thanks!"}]},
],
),
# Test system message with media content
(
[
SystemMessage(content="Hi."),
HumanMessage(
content=[
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": "https://example.com/image.jpg",
},
]
),
],
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Hi.\nWhat's in this image?",
},
{
"type": "image_url",
"image_url": "https://example.com/image.jpg",
},
],
},
],
),
],
)
def test_system_message_handling(
messages: List[BaseMessage], expected: List[Dict[str, Any]]
) -> None:
"""Test that system messages are handled correctly."""
result = convert_to_reka_messages(messages)
assert result == expected
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_multiple_system_messages_error() -> None:
"""Test that multiple system messages raise an error."""
messages = [
SystemMessage(content="System message 1"),
SystemMessage(content="System message 2"),
HumanMessage(content="Hello"),
]
with pytest.raises(ValueError, match="Multiple system messages are not supported."):
convert_to_reka_messages(messages)
@pytest.mark.skip(
reason="Dependency conflict w/ other dependencies for urllib3 versions."
)
def test_get_num_tokens() -> None:
"""Test that token counting works correctly for different input types."""
llm = ChatReka()
import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")
# Test string input
text = "What is the weather like today?"
expected_tokens = len(encoding.encode(text))
assert llm.get_num_tokens(text) == expected_tokens
# Test BaseMessage input
message = HumanMessage(content="What is the weather like today?")
assert isinstance(message.content, str)
expected_tokens = len(encoding.encode(message.content))
assert llm.get_num_tokens(message) == expected_tokens
# Test List[BaseMessage] input
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hi!"),
AIMessage(content="Hello! How can I help you today?"),
]
expected_tokens = sum(
len(encoding.encode(msg.content))
for msg in messages
if isinstance(msg.content, str)
)
assert llm.get_num_tokens(messages) == expected_tokens
# Test empty message list
assert llm.get_num_tokens([]) == 0
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_mlx.py | """Test MLX Chat wrapper."""
from importlib import import_module
def test_import_class() -> None:
"""Test that the class can be imported."""
module_name = "langchain_community.chat_models.mlx"
class_name = "ChatMLX"
module = import_module(module_name)
assert hasattr(module, class_name)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_outlines.py | import pytest
from _pytest.monkeypatch import MonkeyPatch
from pydantic import BaseModel, Field
from langchain_community.chat_models.outlines import ChatOutlines
def test_chat_outlines_initialization(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
chat = ChatOutlines(
model="microsoft/Phi-3-mini-4k-instruct",
max_tokens=42,
stop=["\n"],
)
assert chat.model == "microsoft/Phi-3-mini-4k-instruct"
assert chat.max_tokens == 42
assert chat.backend == "transformers"
assert chat.stop == ["\n"]
def test_chat_outlines_backend_llamacpp(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
chat = ChatOutlines(
model="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf",
backend="llamacpp",
)
assert chat.backend == "llamacpp"
def test_chat_outlines_backend_vllm(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
chat = ChatOutlines(model="microsoft/Phi-3-mini-4k-instruct", backend="vllm")
assert chat.backend == "vllm"
def test_chat_outlines_backend_mlxlm(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
chat = ChatOutlines(model="microsoft/Phi-3-mini-4k-instruct", backend="mlxlm")
assert chat.backend == "mlxlm"
def test_chat_outlines_with_regex(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
regex = r"\d{3}-\d{3}-\d{4}"
chat = ChatOutlines(model="microsoft/Phi-3-mini-4k-instruct", regex=regex)
assert chat.regex == regex
def test_chat_outlines_with_type_constraints(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
chat = ChatOutlines(model="microsoft/Phi-3-mini-4k-instruct", type_constraints=int)
assert chat.type_constraints == int # noqa
def test_chat_outlines_with_json_schema(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
class TestSchema(BaseModel):
name: str = Field(description="A person's name")
age: int = Field(description="A person's age")
chat = ChatOutlines(
model="microsoft/Phi-3-mini-4k-instruct", json_schema=TestSchema
)
assert chat.json_schema == TestSchema
def test_chat_outlines_with_grammar(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
grammar = """
?start: expression
?expression: term (("+" | "-") term)*
?term: factor (("*" | "/") factor)*
?factor: NUMBER | "-" factor | "(" expression ")"
%import common.NUMBER
"""
chat = ChatOutlines(model="microsoft/Phi-3-mini-4k-instruct", grammar=grammar)
assert chat.grammar == grammar
def test_raise_for_multiple_output_constraints(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(ChatOutlines, "build_client", lambda self: self)
with pytest.raises(ValueError):
ChatOutlines(
model="microsoft/Phi-3-mini-4k-instruct",
type_constraints=int,
regex=r"\d{3}-\d{3}-\d{4}",
)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_models/test_tongyi.py | from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.tongyi import (
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = FunctionMessage(name="foo", content="bar")
result = convert_message_to_dict(message)
expected_output = {
"role": "tool",
"tool_call_id": "",
"content": "bar",
"name": "foo",
}
assert result == expected_output
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/test_slack.py | import pathlib
from langchain_community.chat_loaders import slack, utils
def test_slack_chat_loader() -> None:
chat_path = pathlib.Path(__file__).parents[2] / "examples" / "slack_export.zip"
loader = slack.SlackChatLoader(str(chat_path))
chat_sessions = list(
utils.map_ai_messages(loader.lazy_load(), sender="U0500003428")
)
assert chat_sessions, "Chat sessions should not be empty"
assert chat_sessions[1]["messages"], "Chat messages should not be empty"
assert (
"Example message" in chat_sessions[1]["messages"][0].content
), "Chat content mismatch"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/test_whatsapp.py | import pathlib
from langchain_community.chat_loaders import utils, whatsapp
def test_whatsapp_chat_loader() -> None:
chat_path = pathlib.Path(__file__).parent / "data" / "whatsapp_chat.txt"
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(
utils.map_ai_messages(loader.lazy_load(), sender="Dr. Feather")
)
assert chat_sessions, "Chat sessions should not be empty"
assert chat_sessions[0]["messages"], "Chat messages should not be empty"
assert (
"I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest."
" Such a magnificent creature!" in chat_sessions[0]["messages"][0].content
), "Chat content mismatch"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/test_imports.py | from langchain_community.chat_loaders import __all__, _module_lookup
EXPECTED_ALL = [
"BaseChatLoader",
"FolderFacebookMessengerChatLoader",
"GMailLoader",
"IMessageChatLoader",
"LangSmithDatasetChatLoader",
"LangSmithRunChatLoader",
"SingleFileFacebookMessengerChatLoader",
"SlackChatLoader",
"TelegramChatLoader",
"WhatsAppChatLoader",
]
def test_all_imports() -> None:
"""Test that __all__ is correctly set."""
assert set(__all__) == set(EXPECTED_ALL)
assert set(__all__) == set(_module_lookup.keys())
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/test_imessage.py | import datetime
import pathlib
from langchain_community.chat_loaders import imessage, utils
def test_imessage_chat_loader_upgrade_osx11() -> None:
chat_path = (
pathlib.Path(__file__).parent / "data" / "imessage_chat_upgrade_osx_11.db"
)
loader = imessage.IMessageChatLoader(str(chat_path))
chat_sessions = list(
utils.map_ai_messages(loader.lazy_load(), sender="testemail@gmail.com")
)
assert chat_sessions, "Chat sessions should not be empty"
assert chat_sessions[0]["messages"], "Chat messages should not be empty"
first_message = chat_sessions[0]["messages"][0]
# message content in text field
assert "Yeh" in first_message.content, "Chat content mismatch"
# time parsed correctly
expected_message_time = 720845450393148160
assert (
first_message.additional_kwargs["message_time"] == expected_message_time
), "unexpected time"
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert (
first_message.additional_kwargs["message_time_as_datetime"]
== expected_parsed_time
), "date failed to parse"
# is_from_me parsed correctly
assert (
first_message.additional_kwargs["is_from_me"] is False
), "is_from_me failed to parse"
def test_imessage_chat_loader() -> None:
chat_path = pathlib.Path(__file__).parent / "data" / "imessage_chat.db"
loader = imessage.IMessageChatLoader(str(chat_path))
chat_sessions = list(
utils.map_ai_messages(loader.lazy_load(), sender="testemail@gmail.com")
)
assert chat_sessions, "Chat sessions should not be empty"
assert chat_sessions[0]["messages"], "Chat messages should not be empty"
first_message = chat_sessions[0]["messages"][0]
# message content in text field
assert "Yeh" in first_message.content, "Chat content mismatch"
# time parsed correctly
expected_message_time = 720845450393148160
assert (
first_message.additional_kwargs["message_time"] == expected_message_time
), "unexpected time"
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert (
first_message.additional_kwargs["message_time_as_datetime"]
== expected_parsed_time
), "date failed to parse"
# is_from_me parsed correctly
assert (
first_message.additional_kwargs["is_from_me"] is False
), "is_from_me failed to parse"
# short message content in attributedBody field
assert (
"John is the almighty" in chat_sessions[0]["messages"][16].content
), "Chat content mismatch"
# long message content in attributedBody field
long_msg = "aaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbba"
"aaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbb"
assert long_msg in chat_sessions[0]["messages"][18].content, "Chat content mismatch"
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/test_telegram.py | """Test the telegram chat loader."""
import pathlib
import tempfile
import zipfile
from typing import Sequence
import pytest
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_community.chat_loaders import telegram, utils
def _assert_messages_are_equal(
actual_messages: Sequence[BaseMessage],
expected_messages: Sequence[BaseMessage],
) -> None:
assert len(actual_messages) == len(expected_messages)
for actual, expected in zip(actual_messages, expected_messages):
assert actual.content == expected.content
assert (
actual.additional_kwargs["sender"] == expected.additional_kwargs["sender"]
)
def _check_telegram_chat_loader(path: str) -> None:
_data_dir = pathlib.Path(__file__).parent / "data"
source_path = _data_dir / path
# Create a zip file from the directory in a temp directory
with tempfile.TemporaryDirectory() as temp_dir_:
temp_dir = pathlib.Path(temp_dir_)
if path.endswith(".zip"):
# Make a new zip file
zip_path = temp_dir / "telegram_chat.zip"
with zipfile.ZipFile(zip_path, "w") as zip_file:
original_path = _data_dir / path.replace(".zip", "")
for file_path in original_path.iterdir():
zip_file.write(file_path, arcname=file_path.name)
source_path = zip_path
loader = telegram.TelegramChatLoader(str(source_path))
chat_sessions_ = loader.lazy_load()
chat_sessions_ = utils.merge_chat_runs(chat_sessions_)
chat_sessions = list(
utils.map_ai_messages(chat_sessions_, sender="Batman & Robin")
)
assert len(chat_sessions) == 1
session = chat_sessions[0]
assert len(session["messages"]) > 0
assert session["messages"][0].content == "i refuse to converse with you"
expected_content = [
HumanMessage(
content="i refuse to converse with you",
additional_kwargs={
"sender": "Jimmeny Marvelton",
"events": [{"message_time": "23.08.2023 13:11:23 UTC-08:00"}],
},
),
AIMessage(
content="Hi nemesis",
additional_kwargs={
"sender": "Batman & Robin",
"events": [{"message_time": "23.08.2023 13:13:20 UTC-08:00"}],
},
),
HumanMessage(
content="we meet again\n\nyou will not trick me this time",
additional_kwargs={
"sender": "Jimmeny Marvelton",
"events": [{"message_time": "23.08.2023 13:15:35 UTC-08:00"}],
},
),
]
_assert_messages_are_equal(session["messages"], expected_content)
@pytest.mark.parametrize(
"path",
[
"telegram_chat_json",
"telegram_chat_json.zip",
"telegram_chat_json/result.json",
],
)
def test_telegram_chat_loader(path: str) -> None:
_check_telegram_chat_loader(path)
@pytest.mark.skip(reason="requires bs4 but marking it as such doesn't seem to work")
@pytest.mark.parametrize(
"path",
[
"telegram_chat_json",
"telegram_chat_json.zip",
"telegram_chat_json/result.json",
],
)
def test_telegram_chat_loader_html(path: str) -> None:
_check_telegram_chat_loader(path)
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/data/whatsapp_chat.txt | [8/15/23, 9:12:33 AM] Dr. Feather: Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.
[8/15/23, 9:12:43 AM] Dr. Feather: I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!
[8/15/23, 9:12:48 AM] Dr. Feather: image omitted
[8/15/23, 9:13:15 AM] Jungle Jane: That's stunning! Were you able to observe its behavior?
[8/15/23, 9:13:23 AM] Dr. Feather: image omitted
[8/15/23, 9:14:02 AM] Dr. Feather: Yes, it seemed quite social with other macaws. They're known for their playful nature.
[8/15/23, 9:14:15 AM] Jungle Jane: How's the research going on parrot communication?
[8/15/23, 9:14:30 AM] Dr. Feather: image omitted
[8/15/23, 9:14:50 AM] Dr. Feather: It's progressing well. We're learning so much about how they use sound and color to communicate.
[8/15/23, 9:15:10 AM] Jungle Jane: That's fascinating! Can't wait to read your paper on it.
[8/15/23, 9:15:20 AM] Dr. Feather: Thank you! I'll send you a draft soon.
[8/15/23, 9:25:16 PM] Jungle Jane: Looking forward to it! Keep up the great work.
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/data | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/data/telegram_chat_html/messages.html | <!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>Exported Data</title>
<meta content="width=device-width, initial-scale=1.0" name="viewport"/>
<link href="css/style.css" rel="stylesheet"/>
<script src="js/script.js" type="text/javascript">
</script>
</head>
<body onload="CheckLocation();">
<div class="page_wrap">
<div class="page_header">
<div class="content">
<div class="text bold">
Jimmeny Marvelton
</div>
</div>
</div>
<div class="page_body chat_page">
<div class="history">
<div class="message service" id="message-1">
<div class="body details">
23 August 2023
</div>
</div>
<div class="message default clearfix" id="message1">
<div class="pull_left userpic_wrap">
<div class="userpic userpic2" style="width: 42px; height: 42px">
<div class="initials" style="line-height: 42px">
bA
</div>
</div>
</div>
<div class="body">
<div class="pull_right date details" title="23.08.2023 13:11:23 UTC-08:00">
13:11
</div>
<div class="from_name">
Jimmeny Marvelton
</div>
<div class="text">
i refuse to converse with you
</div>
</div>
</div>
<div class="message default clearfix" id="message2">
<div class="pull_left userpic_wrap">
<div class="userpic userpic1" style="width: 42px; height: 42px">
<div class="initials" style="line-height: 42px">
WF
</div>
</div>
</div>
<div class="body">
<div class="pull_right date details" title="23.08.2023 13:13:20 UTC-08:00">
13:13
</div>
<div class="from_name">
Batman & Robin
</div>
<div class="text">
Hi nemesis
</div>
</div>
</div>
<div class="message default clearfix" id="message3">
<div class="pull_left userpic_wrap">
<div class="userpic userpic2" style="width: 42px; height: 42px">
<div class="initials" style="line-height: 42px">
bA
</div>
</div>
</div>
<div class="body">
<div class="pull_right date details" title="23.08.2023 13:15:35 UTC-08:00">
13:15
</div>
<div class="from_name">
Jimmeny Marvelton
</div>
<div class="text">
we meet again
</div>
</div>
</div>
<div class="message default clearfix joined" id="message4">
<div class="body">
<div class="pull_right date details" title="23.08.2023 13:15:53 UTC-08:00">
13:15
</div>
<div class="text">
you will not trick me this time
</div>
</div>
</div>
</div>
</div>
</div>
</body>
</html>
|
0 | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/data | lc_public_repos/langchain/libs/community/tests/unit_tests/chat_loaders/data/telegram_chat_json/result.json | {
"name": "Jimmeny",
"type": "personal_chat",
"id": 5965280513,
"messages": [
{
"id": 1,
"type": "message",
"date": "2023-08-23T13:11:23",
"date_unixtime": "1692821483",
"from": "Jimmeny Marvelton",
"from_id": "user123450513",
"text": "i refuse to converse with you",
"text_entities": [
{
"type": "plain",
"text": "i refuse to converse with you"
}
]
},
{
"id": 2,
"type": "message",
"date": "2023-08-23T13:13:20",
"date_unixtime": "1692821600",
"from": "Batman & Robin",
"from_id": "user6565661032",
"text": "Hi nemesis",
"text_entities": [
{
"type": "plain",
"text": "Hi nemesis"
}
]
},
{
"id": 3,
"type": "message",
"date": "2023-08-23T13:15:35",
"date_unixtime": "1692821735",
"from": "Jimmeny Marvelton",
"from_id": "user123450513",
"text": "we meet again",
"text_entities": [
{
"type": "plain",
"text": "we meet again"
}
]
},
{
"id": 4,
"type": "message",
"date": "2023-08-23T13:15:53",
"date_unixtime": "1692821753",
"from": "Jimmeny Marvelton",
"from_id": "user123450513",
"text": "you will not trick me this time",
"text_entities": [
{
"type": "plain",
"text": "you will not trick me this time"
}
]
}
]
} |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/README.rst | Example Docs
------------
The sample docs directory contains the following files:
- ``example-10k.html`` - A 10-K SEC filing in HTML format
- ``layout-parser-paper.pdf`` - A PDF copy of the layout parser paper
- ``factbook.xml``/``factbook.xsl`` - Example XML/XLS files that you
can use to test stylesheets
These documents can be used to test out the parsers in the library. In
addition, here are instructions for pulling in some sample docs that are
too big to store in the repo.
XBRL 10-K
^^^^^^^^^
You can get an example 10-K in inline XBRL format using the following
``curl``. Note, you need to have the user agent set in the header or the
SEC site will reject your request.
.. code:: bash
curl -O \
-A '${organization} ${email}'
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
You can parse this document using the HTML parser.
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/mlb_teams_2012.csv | "Team", "Payroll (millions)", "Wins"
"Nationals", 81.34, 98
"Reds", 82.20, 97
"Yankees", 197.96, 95
"Giants", 117.62, 94
"Braves", 83.31, 94
"Athletics", 55.37, 94
"Rangers", 120.51, 93
"Orioles", 81.43, 93
"Rays", 64.17, 90
"Angels", 154.49, 89
"Tigers", 132.30, 88
"Cardinals", 110.30, 88
"Dodgers", 95.14, 86
"White Sox", 96.92, 85
"Brewers", 97.65, 83
"Phillies", 174.54, 81
"Diamondbacks", 74.28, 81
"Pirates", 63.43, 79
"Padres", 55.24, 76
"Mariners", 81.97, 75
"Mets", 93.35, 74
"Blue Jays", 75.48, 73
"Royals", 60.91, 72
"Marlins", 118.07, 69
"Red Sox", 173.18, 69
"Indians", 78.43, 68
"Twins", 94.08, 66
"Rockies", 78.06, 64
"Cubs", 88.19, 61
"Astros", 60.65, 55
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/stanley-cups.tsv | Stanley Cups
Team Location Stanley Cups
Blues STL 1
Flyers PHI 2
Maple Leafs TOR 13
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/test_nominal.csv | column1,column2,column3
value1,value2,value3
value4,value5,value6 |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/default-encoding.py | u = "🦜🔗"
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/brandfetch-brandfetch-2.0.0-resolved.json | {
"openapi": "3.0.1",
"info": {
"title": "Brandfetch API",
"description": "Brandfetch API (v2) for retrieving brand information.\n\nSee our [documentation](https://docs.brandfetch.com/) for further details. ",
"termsOfService": "https://brandfetch.com/terms",
"contact": {
"url": "https://brandfetch.com/developers"
},
"version": "2.0.0"
},
"externalDocs": {
"description": "Documentation",
"url": "https://docs.brandfetch.com/"
},
"servers": [
{
"url": "https://api.brandfetch.io/v2"
}
],
"paths": {
"/brands/{domainOrId}": {
"get": {
"summary": "Retrieve a brand",
"description": "Fetch brand information by domain or ID\n\nFurther details here: https://docs.brandfetch.com/reference/retrieve-brand\n",
"parameters": [
{
"name": "domainOrId",
"in": "path",
"description": "Domain or ID of the brand",
"required": true,
"style": "simple",
"explode": false,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "Brand data",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Brand"
},
"examples": {
"brandfetch.com": {
"value": "{\"name\":\"Brandfetch\",\"domain\":\"brandfetch.com\",\"claimed\":true,\"description\":\"All brands. In one place\",\"links\":[{\"name\":\"twitter\",\"url\":\"https://twitter.com/brandfetch\"},{\"name\":\"linkedin\",\"url\":\"https://linkedin.com/company/brandfetch\"}],\"logos\":[{\"type\":\"logo\",\"theme\":\"light\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/id9WE9j86h.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":15555}]},{\"type\":\"logo\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idWbsK1VCy.png\",\"background\":\"transparent\",\"format\":\"png\",\"height\":215,\"width\":800,\"size\":33937},{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idtCMfbWO0.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"height\":null,\"width\":null,\"size\":15567}]},{\"type\":\"symbol\",\"theme\":\"light\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idXGq6SIu2.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":2215}]},{\"type\":\"symbol\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/iddCQ52AR5.svg\",\"background\":\"transparent\",\"format\":\"svg\",\"size\":2215}]},{\"type\":\"icon\",\"theme\":\"dark\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idls3LaPPQ.png\",\"background\":null,\"format\":\"png\",\"height\":400,\"width\":400,\"size\":2565}]}],\"colors\":[{\"hex\":\"#0084ff\",\"type\":\"accent\",\"brightness\":113},{\"hex\":\"#00193E\",\"type\":\"brand\",\"brightness\":22},{\"hex\":\"#F03063\",\"type\":\"brand\",\"brightness\":93},{\"hex\":\"#7B0095\",\"type\":\"brand\",\"brightness\":37},{\"hex\":\"#76CC4B\",\"type\":\"brand\",\"brightness\":176},{\"hex\":\"#FFDA00\",\"type\":\"brand\",\"brightness\":210},{\"hex\":\"#000000\",\"type\":\"dark\",\"brightness\":0},{\"hex\":\"#ffffff\",\"type\":\"light\",\"brightness\":255}],\"fonts\":[{\"name\":\"Poppins\",\"type\":\"title\",\"origin\":\"google\",\"originId\":\"Poppins\",\"weights\":[]},{\"name\":\"Inter\",\"type\":\"body\",\"origin\":\"google\",\"originId\":\"Inter\",\"weights\":[]}],\"images\":[{\"type\":\"banner\",\"formats\":[{\"src\":\"https://asset.brandfetch.io/idL0iThUh6/idUuia5imo.png\",\"background\":\"transparent\",\"format\":\"png\",\"height\":500,\"width\":1500,\"size\":5539}]}]}"
}
}
}
}
},
"400": {
"description": "Invalid domain or ID supplied"
},
"404": {
"description": "The brand does not exist or the domain can't be resolved."
}
},
"security": [
{
"bearerAuth": []
}
]
}
}
},
"components": {
"schemas": {
"Brand": {
"required": [
"claimed",
"colors",
"description",
"domain",
"fonts",
"images",
"links",
"logos",
"name"
],
"type": "object",
"properties": {
"images": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageAsset"
}
},
"fonts": {
"type": "array",
"items": {
"$ref": "#/components/schemas/FontAsset"
}
},
"domain": {
"type": "string"
},
"claimed": {
"type": "boolean"
},
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"links": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Brand_links"
}
},
"logos": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageAsset"
}
},
"colors": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ColorAsset"
}
}
},
"description": "Object representing a brand"
},
"ColorAsset": {
"required": [
"brightness",
"hex",
"type"
],
"type": "object",
"properties": {
"brightness": {
"type": "integer"
},
"hex": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"accent",
"brand",
"customizable",
"dark",
"light",
"vibrant"
]
}
},
"description": "Brand color asset"
},
"FontAsset": {
"type": "object",
"properties": {
"originId": {
"type": "string"
},
"origin": {
"type": "string",
"enum": [
"adobe",
"custom",
"google",
"system"
]
},
"name": {
"type": "string"
},
"type": {
"type": "string"
},
"weights": {
"type": "array",
"items": {
"type": "number"
}
},
"items": {
"type": "string"
}
},
"description": "Brand font asset"
},
"ImageAsset": {
"required": [
"formats",
"theme",
"type"
],
"type": "object",
"properties": {
"formats": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ImageFormat"
}
},
"theme": {
"type": "string",
"enum": [
"light",
"dark"
]
},
"type": {
"type": "string",
"enum": [
"logo",
"icon",
"symbol",
"banner"
]
}
},
"description": "Brand image asset"
},
"ImageFormat": {
"required": [
"background",
"format",
"size",
"src"
],
"type": "object",
"properties": {
"size": {
"type": "integer"
},
"src": {
"type": "string"
},
"background": {
"type": "string",
"enum": [
"transparent"
]
},
"format": {
"type": "string"
},
"width": {
"type": "integer"
},
"height": {
"type": "integer"
}
},
"description": "Brand image asset image format"
},
"Brand_links": {
"required": [
"name",
"url"
],
"type": "object",
"properties": {
"name": {
"type": "string"
},
"url": {
"type": "string"
}
}
}
},
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "API Key"
}
}
}
} |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/whatsapp_chat.txt | [05.05.23, 15:48:11] James: Hi here
[11/8/21, 9:41:32 AM] User name: Message 123
1/23/23, 3:19 AM - User 2: Bye!
1/23/23, 3:22_AM - User 1: And let me know if anything changes
[1/24/21, 12:41:03 PM] ~ User name 2: Of course!
[2023/5/4, 16:13:23] ~ User 2: See you!
7/19/22, 11:32 PM - User 1: Hello
7/20/22, 11:32 am - User 2: Goodbye
4/20/23, 9:42 am - User 3: <Media omitted>
6/29/23, 12:16 am - User 4: This message was deleted
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/docusaurus-sitemap.xml | <?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml"
xmlns:image="http://www.google.com/schemas/sitemap-image/1.1"
xmlns:video="http://www.google.com/schemas/sitemap-video/1.1">
<url>
<loc>https://python.langchain.com/docs/integrations/document_loaders/sitemap</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/cookbook</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/additional_resources</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/modules/chains/how_to/</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/docs/use_cases/summarization</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
<url>
<loc>https://python.langchain.com/</loc>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
</urlset> |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/fake-email-attachment.eml | MIME-Version: 1.0
Date: Fri, 23 Dec 2022 12:08:48 -0600
Message-ID: <CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>
Subject: Fake email with attachment
From: Mallori Harrell <mallori@unstructured.io>
To: Mallori Harrell <mallori@unstructured.io>
Content-Type: multipart/mixed; boundary="0000000000005d654405f082adb7"
--0000000000005d654405f082adb7
Content-Type: multipart/alternative; boundary="0000000000005d654205f082adb5"
--0000000000005d654205f082adb5
Content-Type: text/plain; charset="UTF-8"
Hello!
Here's the attachments!
It includes:
- Lots of whitespace
- Little to no content
- and is a quick read
Best,
Mallori
--0000000000005d654205f082adb5
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr">Hello!=C2=A0<div><br></div><div>Here's the attachments=
!</div><div><br></div><div>It includes:</div><div><ul><li style=3D"margin-l=
eft:15px">Lots of whitespace</li><li style=3D"margin-left:15px">Little=C2=
=A0to no content</li><li style=3D"margin-left:15px">and is a quick read</li=
></ul><div>Best,</div></div><div><br></div><div>Mallori</div><div dir=3D"lt=
r" class=3D"gmail_signature" data-smartmail=3D"gmail_signature"><div dir=3D=
"ltr"><div><div><br></div></div></div></div></div>
--0000000000005d654205f082adb5--
--0000000000005d654405f082adb7
Content-Type: text/plain; charset="US-ASCII"; name="fake-attachment.txt"
Content-Disposition: attachment; filename="fake-attachment.txt"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_lc0tto5j0
Content-ID: <f_lc0tto5j0>
SGV5IHRoaXMgaXMgYSBmYWtlIGF0dGFjaG1lbnQh
--0000000000005d654405f082adb7-- |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/mlb_teams_2012.sql | -- Provisioning table "mlb_teams_2012".
--
-- psql postgresql://postgres@localhost < mlb_teams_2012.sql
DROP TABLE IF EXISTS mlb_teams_2012;
CREATE TABLE mlb_teams_2012 ("Team" VARCHAR, "Payroll (millions)" FLOAT, "Wins" BIGINT);
INSERT INTO mlb_teams_2012
("Team", "Payroll (millions)", "Wins")
VALUES
('Nationals', 81.34, 98),
('Reds', 82.20, 97),
('Yankees', 197.96, 95),
('Giants', 117.62, 94),
('Braves', 83.31, 94),
('Athletics', 55.37, 94),
('Rangers', 120.51, 93),
('Orioles', 81.43, 93),
('Rays', 64.17, 90),
('Angels', 154.49, 89),
('Tigers', 132.30, 88),
('Cardinals', 110.30, 88),
('Dodgers', 95.14, 86),
('White Sox', 96.92, 85),
('Brewers', 97.65, 83),
('Phillies', 174.54, 81),
('Diamondbacks', 74.28, 81),
('Pirates', 63.43, 79),
('Padres', 55.24, 76),
('Mariners', 81.97, 75),
('Mets', 93.35, 74),
('Blue Jays', 75.48, 73),
('Royals', 60.91, 72),
('Marlins', 118.07, 69),
('Red Sox', 173.18, 69),
('Indians', 78.43, 68),
('Twins', 94.08, 66),
('Rockies', 78.06, 64),
('Cubs', 88.19, 61),
('Astros', 60.65, 55)
;
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/stanley-cups.csv | Stanley Cups,,
Team,Location,Stanley Cups
Blues,STL,1
Flyers,PHI,2
Maple Leafs,TOR,13 |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/sample_rss_feeds.opml | <?xml version="1.0" encoding="UTF-8"?>
<opml version="1.0">
<head>
<title>Sample RSS feed subscriptions</title>
</head>
<body>
<outline text="Tech" title="Tech">
<outline type="rss" text="Engadget" title="Engadget" xmlUrl="http://www.engadget.com/rss-full.xml" htmlUrl="http://www.engadget.com"/>
<outline type="rss" text="Ars Technica - All content" title="Ars Technica - All content" xmlUrl="http://feeds.arstechnica.com/arstechnica/index/" htmlUrl="https://arstechnica.com"/>
</outline>
</body>
</opml>
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/facebook_chat.json | {
"participants": [{"name": "User 1"}, {"name": "User 2"}],
"messages": [
{"sender_name": "User 2", "timestamp_ms": 1675597571851, "content": "Bye!"},
{
"sender_name": "User 1",
"timestamp_ms": 1675597435669,
"content": "Oh no worries! Bye"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675596277579,
"content": "No Im sorry it was my mistake, the blue one is not for sale"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675595140251,
"content": "I thought you were selling the blue one!"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675595109305,
"content": "Im not interested in this bag. Im interested in the blue one!"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595068468,
"content": "Here is $129"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595060730,
"photos": [
{"uri": "url_of_some_picture.jpg", "creation_timestamp": 1675595059}
]
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595045152,
"content": "Online is at least $100"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675594799696,
"content": "How much do you want?"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675577876645,
"content": "Goodmorning! $50 is too low."
},
{
"sender_name": "User 1",
"timestamp_ms": 1675549022673,
"content": "Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!"
}
],
"title": "User 1 and User 2 chat",
"is_still_participant": true,
"thread_path": "inbox/User 1 and User 2 chat",
"magic_words": [],
"image": {"uri": "image_of_the_chat.jpg", "creation_timestamp": 1675549016},
"joinable_mode": {"mode": 1, "link": ""}
}
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/README.org | * Example Docs
The sample docs directory contains the following files:
- ~example-10k.html~ - A 10-K SEC filing in HTML format
- ~layout-parser-paper.pdf~ - A PDF copy of the layout parser paper
- ~factbook.xml~ / ~factbook.xsl~ - Example XML/XLS files that you
can use to test stylesheets
These documents can be used to test out the parsers in the library. In
addition, here are instructions for pulling in some sample docs that are
too big to store in the repo.
** XBRL 10-K
You can get an example 10-K in inline XBRL format using the following
~curl~. Note, you need to have the user agent set in the header or the
SEC site will reject your request.
#+BEGIN_SRC bash
curl -O \
-A '${organization} ${email}'
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
#+END_SRC
You can parse this document using the HTML parser.
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/example.json | {
"messages": [
{
"sender_name": "User 2",
"timestamp_ms": 1675597571851,
"content": "Bye!"
},
{
"sender_name": "User 1",
"timestamp_ms": 1675597435669,
"content": "Oh no worries! Bye"
},
{
"sender_name": "User 2",
"timestamp_ms": 1675595060730,
"photos": [
{
"uri": "url_of_some_picture.jpg",
"creation_timestamp": 1675595059
}
]
}
],
"title": "User 1 and User 2 chat"
} |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/hello_world.js | class HelloWorld {
sayHello() {
console.log("Hello World!");
}
}
function main() {
const hello = new HelloWorld();
hello.sayHello();
}
main();
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/example-utf8.html | <html>
<head>
<title>Chew dad's slippers</title>
</head>
<body>
<h1>
Instead of drinking water from the cat bowl, make sure to steal water from
the toilet
</h1>
<h2>Chase the red dot</h2>
<p>
Munch, munch, chomp, chomp hate dogs. Spill litter box, scratch at owner,
destroy all furniture, especially couch get scared by sudden appearance of
cucumber cat is love, cat is life fat baby cat best buddy little guy for
catch eat throw up catch eat throw up bad birds jump on fridge. Purr like
a car engine oh yes, there is my human woman she does best pats ever that
all i like about her hiss meow .
</p>
<p>
Dead stare with ears cocked when “owners” are asleep, cry for no apparent
reason meow all night. Plop down in the middle where everybody walks favor
packaging over toy. Sit on the laptop kitty pounce, trip, faceplant.
</p>
</body>
</html>
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/sitemap.xml | <?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>https://python.langchain.com/en/stable/</loc>
<lastmod>2023-05-04T16:15:31.377584+00:00</lastmod>
<changefreq>weekly</changefreq>
<priority>1</priority>
</url>
<url>
<loc>https://python.langchain.com/en/latest/</loc>
<lastmod>2023-05-05T07:52:19.633878+00:00</lastmod>
<changefreq>daily</changefreq>
<priority>0.9</priority>
</url>
<url>
<loc>https://python.langchain.com/en/harrison-docs-refactor-3-24/</loc>
<lastmod>2023-03-27T02:32:55.132916+00:00</lastmod>
<changefreq>monthly</changefreq>
<priority>0.8</priority>
</url>
</urlset> |
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/example.mht | From: <Saved by Blink>
Snapshot-Content-Location: https://langchain.com/
Subject:
Date: Fri, 16 Jun 2023 19:32:59 -0000
MIME-Version: 1.0
Content-Type: multipart/related;
type="text/html";
boundary="----MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt----"
------MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt----
Content-Type: text/html
Content-ID: <frame-2F1DB31BBD26C55A7F1EEC7561350515@mhtml.blink>
Content-Transfer-Encoding: quoted-printable
Content-Location: https://langchain.com/
<html><head><title>LangChain</title><meta http-equiv=3D"Content-Type" content=3D"text/html; charset=
=3DUTF-8"><link rel=3D"stylesheet" type=3D"text/css" href=3D"cid:css-c9ac93=
be-2ab2-46d8-8690-80da3a6d1832@mhtml.blink" /></head><body data-new-gr-c-s-=
check-loaded=3D"14.1112.0" data-gr-ext-installed=3D""><p align=3D"center">
<b><font size=3D"6">L</font><font size=3D"4">ANG </font><font size=3D"6">C=
</font><font size=3D"4">HAIN </font><font size=3D"2">=F0=9F=A6=9C=EF=B8=8F=
=F0=9F=94=97</font><br>Official Home Page</b><font size=3D"1"> </font>=
</p>
<hr>
<center>
<table border=3D"0" cellspacing=3D"0" width=3D"90%">
<tbody>
<tr>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://langchain.com/integrations.html">Integration=
s</a>=20
</li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://langchain.com/features.html">Features</a>=20
</li></ul></td></tr>
<tr>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://blog.langchain.dev/">Blog</a>=20
</li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://docs.langchain.com/docs/">Conceptual Guide</=
a>=20
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/langchain">Python Repo<=
/a></li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/langchainjs">JavaScript=
Repo</a></li></ul></td></tr>
=20
=09
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://python.langchain.com/en/latest/">Python Docu=
mentation</a> </li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://js.langchain.com/docs/">JavaScript Document=
ation</a>
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/langchain-ai/chat-langchain">Python =
ChatLangChain</a> </li></ul></td>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://github.com/sullivan-sean/chat-langchainjs">=
JavaScript ChatLangChain</a>
</li></ul></td></tr>
<tr>
<td height=3D"45" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://discord.gg/6adMQxSpJS">Discord</a> </li></ul=
></td>
<td height=3D"55" valign=3D"top" width=3D"50%">
<ul>
<li><a href=3D"https://twitter.com/langchainai">Twitter</a>
</li></ul></td></tr>
=09
</tbody></table></center>
<hr>
<font size=3D"2">
<p>If you have any comments about our WEB page, you can=20
write us at the address shown above. However, due to=20
the limited number of personnel in our corporate office, we are unable to=
=20
provide a direct response.</p></font>
<hr>
<p align=3D"left"><font size=3D"2">Copyright =C2=A9 2023-2023<b> LangChain =
Inc.</b></font><font size=3D"2">=20
</font></p>
</body></html>
------MultipartBoundary--dYaUgeoeP18TqraaeOwkeZyu1vI09OtkFwH2rcnJMt------
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/example.html | <html>
<head>
<title>Chew dad's slippers</title>
</head>
<body>
<h1>
Instead of drinking water from the cat bowl, make sure to steal water from
the toilet
</h1>
<h2>Chase the red dot</h2>
<p>
Munch, munch, chomp, chomp hate dogs. Spill litter box, scratch at owner,
destroy all furniture, especially couch get scared by sudden appearance of
cucumber cat is love, cat is life fat baby cat best buddy little guy for
catch eat throw up catch eat throw up bad birds jump on fridge. Purr like
a car engine oh yes, there is my human woman she does best pats ever that
all i like about her hiss meow .
</p>
<p>
Dead stare with ears cocked when owners are asleep, cry for no apparent
reason meow all night. Plop down in the middle where everybody walks favor
packaging over toy. Sit on the laptop kitty pounce, trip, faceplant.
</p>
</body>
</html>
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/factbook.xml | <?xml version="1.0" encoding="UTF-8"?>
<factbook>
<country>
<name>United States</name>
<capital>Washington, DC</capital>
<leader>Joe Biden</leader>
<sport>Baseball</sport>
</country>
<country>
<name>Canada</name>
<capital>Ottawa</capital>
<leader>Justin Trudeau</leader>
<sport>Hockey</sport>
</country>
<country>
<name>France</name>
<capital>Paris</capital>
<leader>Emmanuel Macron</leader>
<sport>Soccer</sport>
</country>
<country>
<name>Trinidad & Tobado</name>
<capital>Port of Spain</capital>
<leader>Keith Rowley</leader>
<sport>Track & Field</sport>
</country>
</factbook>
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/examples/hello_world.py | #!/usr/bin/env python3
import sys
def main() -> int:
print("Hello World!") # noqa: T201
return 0
if __name__ == "__main__":
sys.exit(main())
|
0 | lc_public_repos/langchain/libs/community | lc_public_repos/langchain/libs/community/scripts/lint_imports.sh | #!/bin/bash
# This script searches for invalid imports in tracked files within a Git repository.
#
# Usage: ./scripts/lint_imports.sh /path/to/repository
set -eu
# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi
repository_path="$1"
# make sure not importing from langchain_experimental
result=$(git -C "$repository_path" grep -En '^import langchain_experimental|^from langchain_experimental' -- '*.py' || true)
# Check if any matching lines were found
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "langchain_community should import from langchain_experimental."
exit 1
fi
# make sure no one is importing from the built-in xml library
# instead defusedxml should be used to avoid getting CVEs.
# Whether the standard library actually poses a risk to users
# is very nuanced and depends on the user's environment.
# https://docs.python.org/3/library/xml.etree.elementtree.html
result=$(git -C "$repository_path" grep -En '^from xml.|^import xml$|^import xml.' | grep -vE "# OK: user-must-opt-in| # OK: trusted-source" || true)
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "Triggering an error due to usage of the built-in xml library. "
echo "Please see https://docs.python.org/3/library/xml.html#xml-vulnerabilities."
echo "If this happens, there's likely code that's relying on the standard library "
echo "to parse xml somewhere in the code path. "
echo "Please update the code to force the user to explicitly opt-in to using the standard library or running the code. "
echo "It should be **obvious** without reading the documentation that they are being forced to use the standard library. "
echo "After this is done feel free to add a comment to the line with '# OK: user-must-opt-in', after the import. "
echo "Lacking a clear opt-in mechanism is likely a security risk, and will result in rejection of the PR."
exit 1
fi
|
0 | lc_public_repos/langchain/libs/community | lc_public_repos/langchain/libs/community/scripts/check_imports.py | import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters) for _ in range(20)
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
0 | lc_public_repos/langchain/libs/community | lc_public_repos/langchain/libs/community/scripts/check_pickle.sh | #!/bin/bash
#
# This checks for usage of pickle in the package.
#
# Usage: ./scripts/check_pickle.sh /path/to/repository
#
# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi
repository_path="$1"
# Search for lines matching the pattern within the specified repository
result=$(git -C "$repository_path" grep -E 'pickle.load\(|pickle.loads\(' | grep -v '# ignore\[pickle\]: explicit-opt-in')
# Check if any matching lines were found
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "Please avoid using pickle or cloudpickle."
echo "If you must, then add:"
echo "1. A security notice (scan the code for examples)"
echo "2. Code path should be opt-in."
exit 1
fi
|
0 | lc_public_repos/langchain/libs/community | lc_public_repos/langchain/libs/community/scripts/check_pydantic.sh | #!/bin/bash
#
# This script searches for lines starting with "import pydantic" or "from pydantic"
# in tracked files within a Git repository.
#
# Usage: ./scripts/check_pydantic.sh /path/to/repository
# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi
repository_path="$1"
# Check that we are not using features that cannot be captured via init.
# pre-init is a custom decorator that we introduced to capture the same semantics
# as @root_validator(pre=False, skip_on_failure=False) available in pydantic 1.
count=$(git grep -E '(@root_validator)|(@validator)|(@field_validator)|(@pre_init)' -- "*.py" | wc -l)
# PRs that increase the current count will not be accepted.
# PRs that decrease update the code in the repository
# and allow decreasing the count of are welcome!
current_count=125
if [ "$count" -gt "$current_count" ]; then
echo "The PR seems to be introducing new usage of @root_validator and/or @field_validator."
echo "git grep -E '(@root_validator)|(@validator)|(@field_validator)|(@pre_init)' | wc -l returned $count"
echo "whereas the expected count should be equal or less than $current_count"
echo "Please update the code to instead use @model_validator or __init__"
exit 1
elif [ "$count" -lt "$current_count" ]; then
echo "Please update the $current_count variable in ./scripts/check_pydantic.sh to $count"
exit 1
fi
# We do not want to be using pydantic-settings. There's already a pattern to look
# up env settings in the code base, and we want to be using the existing pattern
# rather than relying on an external dependency.
count=$(git grep -E '^import pydantic_settings|^from pydantic_settings' -- "*.py" | wc -l)
# PRs that increase the current count will not be accepted.
# PRs that decrease update the code in the repository
# and allow decreasing the count of are welcome!
current_count=8
if [ "$count" -gt "$current_count" ]; then
echo "The PR seems to be introducing new usage pydantic_settings."
echo "git grep -E '^import pydantic_settings|^from pydantic_settings' | wc -l returned $count"
echo "whereas the expected count should be equal or less than $current_count"
echo "Please update the code to use Field(default_factory=from_env(..)) or Field(default_factory=secret_from_env(..))"
exit 1
elif [ "$count" -lt "$current_count" ]; then
echo "Please update the $current_count variable in ./scripts/check_pydantic.sh to $count"
exit 1
fi
|
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/together/README.md | This package has moved!
https://github.com/langchain-ai/langchain-together/tree/main/libs/together |
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/anthropic/Makefile | .PHONY: all format lint test tests integration_tests docker_tests help extended_tests
# Default target executed when no arguments are given to make.
all: help
# Define a variable for the test file path.
TEST_FILE ?= tests/unit_tests/
integration_test integration_tests: TEST_FILE=tests/integration_tests/
test tests integration_test integration_tests:
poetry run pytest -vvv --timeout 10 $(TEST_FILE)
test_watch:
poetry run ptw --snapshot-update --now . -- -vv $(TEST_FILE)
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/partners/anthropic --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_anthropic
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES)
spell_check:
poetry run codespell --toml pyproject.toml
spell_fix:
poetry run codespell --toml pyproject.toml -w
check_imports: $(shell find langchain_anthropic -name '*.py')
poetry run python ./scripts/check_imports.py $^
######################
# HELP
######################
help:
@echo '----'
@echo 'check_imports - check imports'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'test - run unit tests'
@echo 'tests - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/anthropic/LICENSE | MIT License
Copyright (c) 2023 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/anthropic/poetry.lock | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anthropic"
version = "0.39.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
files = [
{file = "anthropic-0.39.0-py3-none-any.whl", hash = "sha256:ea17093ae0ce0e1768b0c46501d6086b5bcd74ff39d68cd2d6396374e9de7c09"},
{file = "anthropic-0.39.0.tar.gz", hash = "sha256:94671cc80765f9ce693f76d63a97ee9bef4c2d6063c044e983d21a2e262f63ba"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
jiter = ">=0.4.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
typing-extensions = ">=4.7,<5"
[package.extras]
bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"]
vertex = ["google-auth (>=2,<3)"]
[[package]]
name = "anyio"
version = "4.6.2.post1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
files = [
{file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"},
{file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
version = "2024.8.30"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
]
[[package]]
name = "charset-normalizer"
version = "3.4.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
{file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
[[package]]
name = "codespell"
version = "2.3.0"
description = "Codespell"
optional = false
python-versions = ">=3.8"
files = [
{file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"},
{file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"},
]
[package.extras]
dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"]
hard-encoding-detection = ["chardet"]
toml = ["tomli"]
types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "defusedxml"
version = "0.7.1"
description = "XML bomb protection for Python stdlib modules"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
[[package]]
name = "distro"
version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "freezegun"
version = "1.5.1"
description = "Let your Python tests travel through time"
optional = false
python-versions = ">=3.7"
files = [
{file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"},
{file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"},
]
[package.dependencies]
python-dateutil = ">=2.7"
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.6"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"},
{file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.27.2"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "jiter"
version = "0.7.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
files = [
{file = "jiter-0.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e14027f61101b3f5e173095d9ecf95c1cac03ffe45a849279bde1d97e559e314"},
{file = "jiter-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:979ec4711c2e37ac949561858bd42028884c9799516a923e1ff0b501ef341a4a"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:662d5d3cca58ad6af7a3c6226b641c8655de5beebcb686bfde0df0f21421aafa"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d89008fb47043a469f97ad90840b97ba54e7c3d62dc7cbb6cbf938bd0caf71d"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8b16c35c846a323ce9067170d5ab8c31ea3dbcab59c4f7608bbbf20c2c3b43f"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9e82daaa1b0a68704f9029b81e664a5a9de3e466c2cbaabcda5875f961702e7"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a87a9f586636e1f0dd3651a91f79b491ea0d9fd7cbbf4f5c463eebdc48bda7"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ec05b1615f96cc3e4901678bc863958611584072967d9962f9e571d60711d52"},
{file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a5cb97e35370bde7aa0d232a7f910f5a0fbbc96bc0a7dbaa044fd5cd6bcd7ec3"},
{file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb316dacaf48c8c187cea75d0d7f835f299137e6fdd13f691dff8f92914015c7"},
{file = "jiter-0.7.0-cp310-none-win32.whl", hash = "sha256:243f38eb4072763c54de95b14ad283610e0cd3bf26393870db04e520f60eebb3"},
{file = "jiter-0.7.0-cp310-none-win_amd64.whl", hash = "sha256:2221d5603c139f6764c54e37e7c6960c469cbcd76928fb10d15023ba5903f94b"},
{file = "jiter-0.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:91cec0ad755bd786c9f769ce8d843af955df6a8e56b17658771b2d5cb34a3ff8"},
{file = "jiter-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:feba70a28a27d962e353e978dbb6afd798e711c04cb0b4c5e77e9d3779033a1a"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9d866ec066c3616cacb8535dbda38bb1d470b17b25f0317c4540182bc886ce2"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7a7a00b6f9f18289dd563596f97ecaba6c777501a8ba04bf98e03087bcbc60"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aaf564094c7db8687f2660605e099f3d3e6ea5e7135498486674fcb78e29165"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4d27e09825c1b3c7a667adb500ce8b840e8fc9f630da8454b44cdd4fb0081bb"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca7c287da9c1d56dda88da1d08855a787dbb09a7e2bd13c66a2e288700bd7c7"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db19a6d160f093cbc8cd5ea2abad420b686f6c0e5fb4f7b41941ebc6a4f83cda"},
{file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e46a63c7f877cf7441ffc821c28287cfb9f533ae6ed707bde15e7d4dfafa7ae"},
{file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ba426fa7ff21cb119fa544b75dd3fbee6a70e55a5829709c0338d07ccd30e6d"},
{file = "jiter-0.7.0-cp311-none-win32.whl", hash = "sha256:c07f55a64912b0c7982377831210836d2ea92b7bd343fca67a32212dd72e38e0"},
{file = "jiter-0.7.0-cp311-none-win_amd64.whl", hash = "sha256:ed27b2c43e1b5f6c7fedc5c11d4d8bfa627de42d1143d87e39e2e83ddefd861a"},
{file = "jiter-0.7.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac7930bcaaeb1e229e35c91c04ed2e9f39025b86ee9fc3141706bbf6fff4aeeb"},
{file = "jiter-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:571feae3e7c901a8eedde9fd2865b0dfc1432fb15cab8c675a8444f7d11b7c5d"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8af4df8a262fa2778b68c2a03b6e9d1cb4d43d02bea6976d46be77a3a331af1"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd028d4165097a611eb0c7494d8c1f2aebd46f73ca3200f02a175a9c9a6f22f5"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6b487247c7836810091e9455efe56a52ec51bfa3a222237e1587d04d3e04527"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6d28a92f28814e1a9f2824dc11f4e17e1df1f44dc4fdeb94c5450d34bcb2602"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90443994bbafe134f0b34201dad3ebe1c769f0599004084e046fb249ad912425"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f9abf464f9faac652542ce8360cea8e68fba2b78350e8a170248f9bcc228702a"},
{file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db7a8d99fc5f842f7d2852f06ccaed066532292c41723e5dff670c339b649f88"},
{file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:15cf691ebd8693b70c94627d6b748f01e6d697d9a6e9f2bc310934fcfb7cf25e"},
{file = "jiter-0.7.0-cp312-none-win32.whl", hash = "sha256:9dcd54fa422fb66ca398bec296fed5f58e756aa0589496011cfea2abb5be38a5"},
{file = "jiter-0.7.0-cp312-none-win_amd64.whl", hash = "sha256:cc989951f73f9375b8eacd571baaa057f3d7d11b7ce6f67b9d54642e7475bfad"},
{file = "jiter-0.7.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:24cecd18df540963cd27c08ca5ce1d0179f229ff78066d9eecbe5add29361340"},
{file = "jiter-0.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d41b46236b90b043cca73785674c23d2a67d16f226394079d0953f94e765ed76"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b160db0987171365c153e406a45dcab0ee613ae3508a77bfff42515cb4ce4d6e"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1c8d91e0f0bd78602eaa081332e8ee4f512c000716f5bc54e9a037306d693a7"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:997706c683195eeff192d2e5285ce64d2a610414f37da3a3f2625dcf8517cf90"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ea52a8a0ff0229ab2920284079becd2bae0688d432fca94857ece83bb49c541"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d77449d2738cf74752bb35d75ee431af457e741124d1db5e112890023572c7c"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8203519907a1d81d6cb00902c98e27c2d0bf25ce0323c50ca594d30f5f1fbcf"},
{file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41d15ccc53931c822dd7f1aebf09faa3cda2d7b48a76ef304c7dbc19d1302e51"},
{file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:febf3179b2fabf71fbd2fd52acb8594163bb173348b388649567a548f356dbf6"},
{file = "jiter-0.7.0-cp313-none-win32.whl", hash = "sha256:4a8e2d866e7eda19f012444e01b55079d8e1c4c30346aaac4b97e80c54e2d6d3"},
{file = "jiter-0.7.0-cp313-none-win_amd64.whl", hash = "sha256:7417c2b928062c496f381fb0cb50412eee5ad1d8b53dbc0e011ce45bb2de522c"},
{file = "jiter-0.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9c62c737b5368e51e74960a08fe1adc807bd270227291daede78db24d5fbf556"},
{file = "jiter-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e4640722b1bef0f6e342fe4606aafaae0eb4f4be5c84355bb6867f34400f6688"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f367488c3b9453eab285424c61098faa1cab37bb49425e69c8dca34f2dfe7d69"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cf5d42beb3514236459454e3287db53d9c4d56c4ebaa3e9d0efe81b19495129"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc5190ea1113ee6f7252fa8a5fe5a6515422e378356c950a03bbde5cafbdbaab"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63ee47a149d698796a87abe445fc8dee21ed880f09469700c76c8d84e0d11efd"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48592c26ea72d3e71aa4bea0a93454df907d80638c3046bb0705507b6704c0d7"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:79fef541199bd91cfe8a74529ecccb8eaf1aca38ad899ea582ebbd4854af1e51"},
{file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d1ef6bb66041f2514739240568136c81b9dcc64fd14a43691c17ea793b6535c0"},
{file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca4d950863b1c238e315bf159466e064c98743eef3bd0ff9617e48ff63a4715"},
{file = "jiter-0.7.0-cp38-none-win32.whl", hash = "sha256:897745f230350dcedb8d1ebe53e33568d48ea122c25e6784402b6e4e88169be7"},
{file = "jiter-0.7.0-cp38-none-win_amd64.whl", hash = "sha256:b928c76a422ef3d0c85c5e98c498ce3421b313c5246199541e125b52953e1bc0"},
{file = "jiter-0.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9b669ff6f8ba08270dee9ccf858d3b0203b42314a428a1676762f2d390fbb64"},
{file = "jiter-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5be919bacd73ca93801c3042bce6e95cb9c555a45ca83617b9b6c89df03b9c2"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a282e1e8a396dabcea82d64f9d05acf7efcf81ecdd925b967020dcb0e671c103"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:17ecb1a578a56e97a043c72b463776b5ea30343125308f667fb8fce4b3796735"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b6045fa0527129218cdcd8a8b839f678219686055f31ebab35f87d354d9c36e"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:189cc4262a92e33c19d4fd24018f5890e4e6da5b2581f0059938877943f8298c"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c138414839effbf30d185e30475c6dc8a16411a1e3681e5fd4605ab1233ac67a"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2791604acef33da6b72d5ecf885a32384bcaf9aa1e4be32737f3b8b9588eef6a"},
{file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae60ec89037a78d60bbf3d8b127f1567769c8fa24886e0abed3f622791dea478"},
{file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:836f03dea312967635233d826f783309b98cfd9ccc76ac776e224cfcef577862"},
{file = "jiter-0.7.0-cp39-none-win32.whl", hash = "sha256:ebc30ae2ce4bc4986e1764c404b4ea1924f926abf02ce92516485098f8545374"},
{file = "jiter-0.7.0-cp39-none-win_amd64.whl", hash = "sha256:abf596f951370c648f37aa9899deab296c42a3829736e598b0dd10b08f77a44d"},
{file = "jiter-0.7.0.tar.gz", hash = "sha256:c061d9738535497b5509f8970584f20de1e900806b239a39a9994fc191dad630"},
]
[[package]]
name = "jsonpatch"
version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
{file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
]
[package.dependencies]
jsonpointer = ">=1.9"
[[package]]
name = "jsonpointer"
version = "3.0.0"
description = "Identify specific nodes in a JSON document (RFC 6901)"
optional = false
python-versions = ">=3.7"
files = [
{file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
{file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
]
[[package]]
name = "langchain-core"
version = "0.3.19"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.9,<4.0"
files = []
develop = true
[package.dependencies]
jsonpatch = "^1.33"
langsmith = "^0.1.125"
packaging = ">=23.2,<25"
pydantic = [
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
PyYAML = ">=5.3"
tenacity = ">=8.1.0,!=8.4.0,<10.0.0"
typing-extensions = ">=4.7"
[package.source]
type = "directory"
url = "../../core"
[[package]]
name = "langchain-tests"
version = "0.3.1"
description = "Standard tests for LangChain implementations"
optional = false
python-versions = ">=3.9,<4.0"
files = []
develop = true
[package.dependencies]
httpx = "^0.27.0"
langchain-core = "^0.3.15"
pytest = ">=7,<9"
syrupy = "^4"
[package.source]
type = "directory"
url = "../../standard-tests"
[[package]]
name = "langsmith"
version = "0.1.139"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.139-py3-none-any.whl", hash = "sha256:2a4a541bfbd0a9727255df28a60048c85bc8c4c6a276975923785c3fd82dc879"},
{file = "langsmith-0.1.139.tar.gz", hash = "sha256:2f9e4d32fef3ad7ef42c8506448cce3a31ad6b78bb4f3310db04ddaa1e9d744d"},
]
[package.dependencies]
httpx = ">=0.23.0,<1"
orjson = ">=3.9.14,<4.0.0"
pydantic = [
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
requests = ">=2,<3"
requests-toolbelt = ">=1.0.0,<2.0.0"
[[package]]
name = "mypy"
version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
faster-cache = ["orjson"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "orjson"
version = "3.10.11"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9a187742d3ead9df2e49240234d728c67c356516cf4db018833a86f20ec18c"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77b0fed6f209d76c1c39f032a70df2d7acf24b1812ca3e6078fd04e8972685a3"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63fc9d5fe1d4e8868f6aae547a7b8ba0a2e592929245fff61d633f4caccdcdd6"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65cd3e3bb4fbb4eddc3c1e8dce10dc0b73e808fcb875f9fab40c81903dd9323e"},
{file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f67c570602300c4befbda12d153113b8974a3340fdcf3d6de095ede86c06d92"},
{file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1f39728c7f7d766f1f5a769ce4d54b5aaa4c3f92d5b84817053cc9995b977acc"},
{file = "orjson-3.10.11-cp310-none-win32.whl", hash = "sha256:1789d9db7968d805f3d94aae2c25d04014aae3a2fa65b1443117cd462c6da647"},
{file = "orjson-3.10.11-cp310-none-win_amd64.whl", hash = "sha256:5576b1e5a53a5ba8f8df81872bb0878a112b3ebb1d392155f00f54dd86c83ff6"},
{file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"},
{file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"},
{file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"},
{file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"},
{file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"},
{file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"},
{file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"},
{file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"},
{file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"},
{file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"},
{file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"},
{file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"},
{file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"},
{file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"},
{file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"},
{file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"},
{file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"},
{file = "orjson-3.10.11-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:19b3763e8bbf8ad797df6b6b5e0fc7c843ec2e2fc0621398534e0c6400098f87"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be83a13312e5e58d633580c5eb8d0495ae61f180da2722f20562974188af205"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:afacfd1ab81f46dedd7f6001b6d4e8de23396e4884cd3c3436bd05defb1a6446"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb4d0bea56bba596723d73f074c420aec3b2e5d7d30698bc56e6048066bd560c"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96ed1de70fcb15d5fed529a656df29f768187628727ee2788344e8a51e1c1350"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bfb30c891b530f3f80e801e3ad82ef150b964e5c38e1fb8482441c69c35c61c"},
{file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d496c74fc2b61341e3cefda7eec21b7854c5f672ee350bc55d9a4997a8a95204"},
{file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:655a493bac606655db9a47fe94d3d84fc7f3ad766d894197c94ccf0c5408e7d3"},
{file = "orjson-3.10.11-cp38-none-win32.whl", hash = "sha256:b9546b278c9fb5d45380f4809e11b4dd9844ca7aaf1134024503e134ed226161"},
{file = "orjson-3.10.11-cp38-none-win_amd64.whl", hash = "sha256:b592597fe551d518f42c5a2eb07422eb475aa8cfdc8c51e6da7054b836b26782"},
{file = "orjson-3.10.11-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95f2ecafe709b4e5c733b5e2768ac569bed308623c85806c395d9cca00e08af"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c00d4acded0c51c98754fe8218cb49cb854f0f7eb39ea4641b7f71732d2cb7"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:461311b693d3d0a060439aa669c74f3603264d4e7a08faa68c47ae5a863f352d"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52ca832f17d86a78cbab86cdc25f8c13756ebe182b6fc1a97d534051c18a08de"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c57ea78a753812f528178aa2f1c57da633754c91d2124cb28991dab4c79a54"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7fcfc6f7ca046383fb954ba528587e0f9336828b568282b27579c49f8e16aad"},
{file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:86b9dd983857970c29e4c71bb3e95ff085c07d3e83e7c46ebe959bac07ebd80b"},
{file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d83f87582d223e54efb2242a79547611ba4ebae3af8bae1e80fa9a0af83bb7f"},
{file = "orjson-3.10.11-cp39-none-win32.whl", hash = "sha256:9fd0ad1c129bc9beb1154c2655f177620b5beaf9a11e0d10bac63ef3fce96950"},
{file = "orjson-3.10.11-cp39-none-win_amd64.whl", hash = "sha256:10f416b2a017c8bd17f325fb9dee1fb5cdd7a54e814284896b7c3f2763faa017"},
{file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"},
]
[[package]]
name = "packaging"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pydantic"
version = "2.9.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
typing-extensions = [
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
]
[package.extras]
email = ["email-validator (>=2.0.0)"]
timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
version = "2.23.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
]
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pytest"
version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.21.2"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"},
{file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
]
[package.dependencies]
pytest = ">=6.2.5"
[package.extras]
dev = ["pre-commit", "pytest-asyncio", "tox"]
[[package]]
name = "pytest-timeout"
version = "2.3.1"
description = "pytest plugin to abort hanging tests"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"},
{file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"},
]
[package.dependencies]
pytest = ">=7.0.0"
[[package]]
name = "pytest-watcher"
version = "0.3.5"
description = "Automatically rerun your tests on file modifications"
optional = false
python-versions = ">=3.7.0,<4.0.0"
files = [
{file = "pytest_watcher-0.3.5-py3-none-any.whl", hash = "sha256:af00ca52c7be22dc34c0fd3d7ffef99057207a73b05dc5161fe3b2fe91f58130"},
{file = "pytest_watcher-0.3.5.tar.gz", hash = "sha256:8896152460ba2b1a8200c12117c6611008ec96c8b2d811f0a05ab8a82b043ff8"},
]
[package.dependencies]
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
]
[package.dependencies]
six = ">=1.5"
[[package]]
name = "pyyaml"
version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
]
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
[[package]]
name = "ruff"
version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
{file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
{file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
{file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
{file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "syrupy"
version = "4.7.2"
description = "Pytest Snapshot Test Utility"
optional = false
python-versions = ">=3.8.1"
files = [
{file = "syrupy-4.7.2-py3-none-any.whl", hash = "sha256:eae7ba6be5aed190237caa93be288e97ca1eec5ca58760e4818972a10c4acc64"},
{file = "syrupy-4.7.2.tar.gz", hash = "sha256:ea45e099f242de1bb53018c238f408a5bb6c82007bc687aefcbeaa0e1c2e935a"},
]
[package.dependencies]
pytest = ">=7.0.0,<9.0.0"
[[package]]
name = "tenacity"
version = "9.0.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
files = [
{file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
{file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
]
[package.extras]
doc = ["reno", "sphinx"]
test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "tomli"
version = "2.0.2"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "watchdog"
version = "6.0.0"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.9"
files = [
{file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"},
{file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"},
{file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"},
{file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"},
{file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"},
{file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"},
{file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"},
{file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"},
{file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"},
{file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"},
{file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
content-hash = "7d24a5eb5b867fa9ad80cbc9fb80d630e7cb00a490b62502cdb57c2fe95cd125"
|
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/anthropic/README.md | # langchain-anthropic
This package contains the LangChain integration for Anthropic's generative models.
## Installation
`pip install -U langchain-anthropic`
## Chat Models
Anthropic recommends using their chat models over text completions.
You can see their recommended models [here](https://docs.anthropic.com/claude/docs/models-overview#model-recommendations).
To use, you should have an Anthropic API key configured. Initialize the model as:
```
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import AIMessage, HumanMessage
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0, max_tokens=1024)
```
### Define the input message
`message = HumanMessage(content="What is the capital of France?")`
### Generate a response using the model
`response = model.invoke([message])`
For a more detailed walkthrough see [here](https://python.langchain.com/docs/integrations/chat/anthropic).
## LLMs (Legacy)
You can use the Claude 2 models for text completions.
```python
from langchain_anthropic import AnthropicLLM
model = AnthropicLLM(model="claude-2.1", temperature=0, max_tokens=1024)
response = model.invoke("The best restaurant in San Francisco is: ")
``` |
0 | lc_public_repos/langchain/libs/partners | lc_public_repos/langchain/libs/partners/anthropic/pyproject.toml | [build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "langchain-anthropic"
version = "0.3.0"
description = "An integration package connecting AnthropicMessages and LangChain"
authors = []
readme = "README.md"
repository = "https://github.com/langchain-ai/langchain"
license = "MIT"
[tool.mypy]
disallow_untyped_defs = "True"
[tool.poetry.urls]
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/anthropic"
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-anthropic%3D%3D0%22&expanded=true"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
anthropic = ">=0.39.0,<1"
langchain-core = "^0.3.17"
pydantic = "^2.7.4"
[tool.ruff.lint]
select = ["E", "F", "I", "T201"]
[tool.coverage.run]
omit = ["tests/*"]
[tool.pytest.ini_options]
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
markers = [
"requires: mark tests as requiring a specific library",
"compile: mark placeholder test used to compile integration tests without running them",
]
asyncio_mode = "auto"
[tool.poetry.dependencies.defusedxml]
version = "^0.7.1"
optional = true
[tool.poetry.group.test]
optional = true
[tool.poetry.group.codespell]
optional = true
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.test_integration]
optional = true
[tool.poetry.group.test.dependencies]
pytest = "^7.3.0"
freezegun = "^1.2.2"
pytest-mock = "^3.10.0"
syrupy = "^4.0.2"
pytest-watcher = "^0.3.4"
pytest-asyncio = "^0.21.1"
defusedxml = "^0.7.1"
pytest-timeout = "^2.3.1"
[tool.poetry.group.codespell.dependencies]
codespell = "^2.2.0"
[tool.poetry.group.lint.dependencies]
ruff = "^0.5"
[tool.poetry.group.typing.dependencies]
mypy = "^1.10"
[tool.poetry.group.test.dependencies.langchain-core]
path = "../../core"
develop = true
[tool.poetry.group.test.dependencies.langchain-tests]
path = "../../standard-tests"
develop = true
[tool.poetry.group.dev.dependencies.langchain-core]
path = "../../core"
develop = true
[tool.poetry.group.test_integration.dependencies.langchain-core]
path = "../../core"
develop = true
[tool.poetry.group.typing.dependencies.langchain-core]
path = "../../core"
develop = true
|
0 | lc_public_repos/langchain/libs/partners/anthropic | lc_public_repos/langchain/libs/partners/anthropic/langchain_anthropic/output_parsers.py | from typing import Any, List, Optional, Type, Union, cast
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from pydantic import BaseModel, ConfigDict
class ToolsOutputParser(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: Optional[List[Type[BaseModel]]] = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast(AIMessage, result[0].message)
tool_calls: List = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
else:
return [tool_call for tool_call in tool_calls]
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> List[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: Union[str, List[Union[str, dict]]]) -> List[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"])
)
return tool_calls
else:
return []
|
0 | lc_public_repos/langchain/libs/partners/anthropic | lc_public_repos/langchain/libs/partners/anthropic/langchain_anthropic/llms.py | import re
import warnings
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
)
import anthropic
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseLanguageModel, LangSmithParams
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.prompt_values import PromptValue
from langchain_core.utils import (
get_pydantic_field_names,
)
from langchain_core.utils.utils import (
_build_model_kwargs,
from_env,
secret_from_env,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
class _AnthropicCommon(BaseLanguageModel):
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: str = Field(default="claude-2", alias="model_name")
"""Model name to use."""
max_tokens_to_sample: int = Field(default=1024, alias="max_tokens")
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: Optional[int] = None
"""Number of most likely tokens to consider at each step."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
default_request_timeout: Optional[float] = None
"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""
max_retries: int = 2
"""Number of retries allowed for requests sent to the Anthropic Completion API."""
anthropic_api_url: Optional[str] = Field(
alias="base_url",
default_factory=from_env(
"ANTHROPIC_API_URL",
default="https://api.anthropic.com",
),
)
"""Base URL for API requests. Only specify if using a proxy or service emulator.
If a value isn't passed in, will attempt to read the value from
ANTHROPIC_API_URL. If not set, the default value of 'https://api.anthropic.com' will
be used.
"""
anthropic_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""),
)
"""Automatically read from env var `ANTHROPIC_API_KEY` if not provided."""
HUMAN_PROMPT: Optional[str] = None
AI_PROMPT: Optional[str] = None
count_tokens: Optional[Callable[[str], int]] = None
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict) -> Any:
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
self.client = anthropic.Anthropic(
base_url=self.anthropic_api_url,
api_key=self.anthropic_api_key.get_secret_value(),
timeout=self.default_request_timeout,
max_retries=self.max_retries,
)
self.async_client = anthropic.AsyncAnthropic(
base_url=self.anthropic_api_url,
api_key=self.anthropic_api_key.get_secret_value(),
timeout=self.default_request_timeout,
max_retries=self.max_retries,
)
self.HUMAN_PROMPT = anthropic.HUMAN_PROMPT
self.AI_PROMPT = anthropic.AI_PROMPT
return self
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return {**d, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{}, **self._default_params}
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if stop is None:
stop = []
# Never want model to invent new turns of Human / Assistant dialog.
stop.extend([self.HUMAN_PROMPT])
return stop
class AnthropicLLM(LLM, _AnthropicCommon):
"""Anthropic large language model.
To use, you should have the environment variable ``ANTHROPIC_API_KEY``
set with your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_anthropic import AnthropicLLM
model = AnthropicLLM()
"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def raise_warning(cls, values: Dict) -> Any:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain_anthropic import ChatAnthropic` "
"instead"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic-llm"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anthropic_api_key": "ANTHROPIC_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"max_tokens": self.max_tokens_to_sample,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"model_kwargs": self.model_kwargs,
"streaming": self.streaming,
"default_request_timeout": self.default_request_timeout,
"max_retries": self.max_retries,
}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = super()._get_ls_params(stop=stop, **kwargs)
identifying_params = self._identifying_params
if max_tokens := kwargs.get(
"max_tokens_to_sample",
identifying_params.get("max_tokens"),
):
params["ls_max_tokens"] = max_tokens
return params
def _wrap_prompt(self, prompt: str) -> str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if prompt.startswith(self.HUMAN_PROMPT):
return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model.invoke(prompt)
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = self.client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def convert_prompt(self, prompt: PromptValue) -> str:
return self._wrap_prompt(prompt.to_string())
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params
):
chunk = GenerationChunk(text=token.completion)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
async for token in await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**params,
):
chunk = GenerationChunk(text=token.completion)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
raise NotImplementedError(
"Anthropic's legacy count_tokens method was removed in anthropic 0.39.0 "
"and langchain-anthropic 0.3.0. Please use "
"ChatAnthropic.get_num_tokens_from_messages instead."
)
@deprecated(since="0.1.0", removal="1.0.0", alternative="AnthropicLLM")
class Anthropic(AnthropicLLM):
"""Anthropic large language model."""
pass
|
0 | lc_public_repos/langchain/libs/partners/anthropic | lc_public_repos/langchain/libs/partners/anthropic/langchain_anthropic/chat_models.py | import copy
import re
import warnings
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
Union,
cast,
)
import anthropic
from langchain_core._api import beta, deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
LangSmithParams,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
ToolCall,
ToolMessage,
)
from langchain_core.messages.ai import InputTokenDetails, UsageMetadata
from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chunk
from langchain_core.output_parsers import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import (
Runnable,
RunnableMap,
RunnablePassthrough,
)
from langchain_core.tools import BaseTool
from langchain_core.utils import (
from_env,
get_pydantic_field_names,
secret_from_env,
)
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import (
BaseModel,
ConfigDict,
Field,
PrivateAttr,
SecretStr,
model_validator,
)
from typing_extensions import NotRequired, Self
from langchain_anthropic.output_parsers import extract_tool_calls
_message_type_lookups = {
"human": "user",
"ai": "assistant",
"AIMessageChunk": "assistant",
"HumanMessageChunk": "user",
}
def _format_image(image_url: str) -> Dict:
"""
Formats an image of format data:image/jpeg;base64,{b64_string}
to a dict for anthropic api
{
"type": "base64",
"media_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
And throws an error if it's not a b64 image
"""
regex = r"^data:(?P<media_type>image/.+);base64,(?P<data>.+)$"
match = re.match(regex, image_url)
if match is None:
raise ValueError(
"Anthropic only supports base64-encoded images currently."
" Example: data:image/png;base64,'/9j/4AAQSk'..."
)
return {
"type": "base64",
"media_type": match.group("media_type"),
"data": match.group("data"),
}
def _merge_messages(
messages: Sequence[BaseMessage],
) -> List[Union[SystemMessage, AIMessage, HumanMessage]]:
"""Merge runs of human/tool messages into single human messages with content blocks.""" # noqa: E501
merged: list = []
for curr in messages:
if isinstance(curr, ToolMessage):
if (
isinstance(curr.content, list)
and curr.content
and all(
isinstance(block, dict) and block.get("type") == "tool_result"
for block in curr.content
)
):
curr = HumanMessage(curr.content) # type: ignore[misc]
else:
curr = HumanMessage( # type: ignore[misc]
[
{
"type": "tool_result",
"content": curr.content,
"tool_use_id": curr.tool_call_id,
"is_error": curr.status == "error",
}
]
)
last = merged[-1] if merged else None
if any(
all(isinstance(m, c) for m in (curr, last))
for c in (SystemMessage, HumanMessage)
):
if isinstance(cast(BaseMessage, last).content, str):
new_content: List = [
{"type": "text", "text": cast(BaseMessage, last).content}
]
else:
new_content = copy.copy(cast(list, cast(BaseMessage, last).content))
if isinstance(curr.content, str):
new_content.append({"type": "text", "text": curr.content})
else:
new_content.extend(curr.content)
merged[-1] = curr.model_copy(update={"content": new_content})
else:
merged.append(curr)
return merged
def _format_messages(
messages: List[BaseMessage],
) -> Tuple[Union[str, List[Dict], None], List[Dict]]:
"""Format messages for anthropic."""
"""
[
{
"role": _message_type_lookups[m.type],
"content": [_AnthropicMessageContent(text=m.content).model_dump()],
}
for m in messages
]
"""
system: Union[str, List[Dict], None] = None
formatted_messages: List[Dict] = []
merged_messages = _merge_messages(messages)
for i, message in enumerate(merged_messages):
if message.type == "system":
if system is not None:
raise ValueError("Received multiple non-consecutive system messages.")
elif isinstance(message.content, list):
system = [
(
block
if isinstance(block, dict)
else {"type": "text", "text": block}
)
for block in message.content
]
else:
system = message.content
continue
role = _message_type_lookups[message.type]
content: Union[str, List]
if not isinstance(message.content, str):
# parse as dict
assert isinstance(
message.content, list
), "Anthropic message content must be str or list of dicts"
# populate content
content = []
for block in message.content:
if isinstance(block, str):
content.append({"type": "text", "text": block})
elif isinstance(block, dict):
if "type" not in block:
raise ValueError("Dict content block must have a type key")
elif block["type"] == "image_url":
# convert format
source = _format_image(block["image_url"]["url"])
content.append({"type": "image", "source": source})
elif block["type"] == "tool_use":
# If a tool_call with the same id as a tool_use content block
# exists, the tool_call is preferred.
if isinstance(message, AIMessage) and block["id"] in [
tc["id"] for tc in message.tool_calls
]:
overlapping = [
tc
for tc in message.tool_calls
if tc["id"] == block["id"]
]
content.extend(
_lc_tool_calls_to_anthropic_tool_use_blocks(overlapping)
)
else:
block.pop("text", None)
content.append(block)
elif block["type"] == "text":
text = block.get("text", "")
# Only add non-empty strings for now as empty ones are not
# accepted.
# https://github.com/anthropics/anthropic-sdk-python/issues/461
if text.strip():
content.append(
{
k: v
for k, v in block.items()
if k in ("type", "text", "cache_control")
}
)
elif block["type"] == "tool_result":
tool_content = _format_messages(
[HumanMessage(block["content"])]
)[1][0]["content"]
content.append({**block, **{"content": tool_content}})
else:
content.append(block)
else:
raise ValueError(
f"Content blocks must be str or dict, instead was: "
f"{type(block)}"
)
else:
content = message.content
# Ensure all tool_calls have a tool_use content block
if isinstance(message, AIMessage) and message.tool_calls:
content = content or []
content = (
[{"type": "text", "text": message.content}]
if isinstance(content, str) and content
else content
)
tool_use_ids = [
cast(dict, block)["id"]
for block in content
if cast(dict, block)["type"] == "tool_use"
]
missing_tool_calls = [
tc for tc in message.tool_calls if tc["id"] not in tool_use_ids
]
cast(list, content).extend(
_lc_tool_calls_to_anthropic_tool_use_blocks(missing_tool_calls)
)
formatted_messages.append({"role": role, "content": content})
return system, formatted_messages
class ChatAnthropic(BaseChatModel):
"""Anthropic chat models.
See https://docs.anthropic.com/en/docs/models-overview for a list of the latest models.
Setup:
Install ``langchain-anthropic`` and set environment variable ``ANTHROPIC_API_KEY``.
.. code-block:: bash
pip install -U langchain-anthropic
export ANTHROPIC_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Anthropic model to use. E.g. "claude-3-sonnet-20240229".
temperature: float
Sampling temperature. Ranges from 0.0 to 1.0.
max_tokens: int
Max number of tokens to generate.
Key init args — client params:
timeout: Optional[float]
Timeout for requests.
max_retries: int
Max number of retries if a request fails.
api_key: Optional[str]
Anthropic API key. If not passed in will be read from env var ANTHROPIC_API_KEY.
base_url: Optional[str]
Base URL for API requests. Only specify if using a proxy or service
emulator.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(
model="claude-3-sonnet-20240229",
temperature=0,
max_tokens=1024,
timeout=None,
max_retries=2,
# api_key="...",
# base_url="...",
# other params...
)
**NOTE**: Any param which is not explicitly supported will be passed directly to the
``anthropic.Anthropic.messages.create(...)`` API every time to the model is
invoked. For example:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
import anthropic
ChatAnthropic(..., extra_headers={}).invoke(...)
# results in underlying API call of:
anthropic.Anthropic(..).messages.create(..., extra_headers={})
# which is also equivalent to:
ChatAnthropic(...).invoke(..., extra_headers={})
Invoke:
.. code-block:: python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
llm.invoke(messages)
.. code-block:: python
AIMessage(content="J'aime la programmation.", response_metadata={'id': 'msg_01Trik66aiQ9Z1higrD5XFx3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 25, 'output_tokens': 11}}, id='run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36})
Stream:
.. code-block:: python
for chunk in llm.stream(messages):
print(chunk)
.. code-block:: python
AIMessageChunk(content='J', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content="'", id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='a', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='ime', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content=' la', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content=' programm', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='ation', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='.', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
.. code-block:: python
stream = llm.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
AIMessageChunk(content="J'aime la programmation.", id='run-b34faef0-882f-4869-a19c-ed2b856e6361')
Async:
.. code-block:: python
await llm.ainvoke(messages)
# stream:
# async for chunk in (await llm.astream(messages))
# batch:
# await llm.abatch([messages])
.. code-block:: python
AIMessage(content="J'aime la programmation.", response_metadata={'id': 'msg_01Trik66aiQ9Z1higrD5XFx3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 25, 'output_tokens': 11}}, id='run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36})
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
.. code-block:: python
[{'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': 'toolu_01KzpPEAgzura7hpBqwHbWdo'},
{'name': 'GetWeather',
'args': {'location': 'New York, NY'},
'id': 'toolu_01JtgbVGVJbiSwtZk3Uycezx'},
{'name': 'GetPopulation',
'args': {'location': 'Los Angeles, CA'},
'id': 'toolu_01429aygngesudV9nTbCKGuw'},
{'name': 'GetPopulation',
'args': {'location': 'New York, NY'},
'id': 'toolu_01JPktyd44tVMeBcPPnFSEJG'}]
See ``ChatAnthropic.bind_tools()`` method for more.
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
See ``ChatAnthropic.with_structured_output()`` for more.
Image input:
.. code-block:: python
import base64
import httpx
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
)
ai_msg = llm.invoke([message])
ai_msg.content
.. code-block:: python
"The image depicts a sunny day with a partly cloudy sky. The sky is a brilliant blue color with scattered white clouds drifting across. The lighting and cloud patterns suggest pleasant, mild weather conditions. The scene shows a grassy field or meadow with a wooden boardwalk trail leading through it, indicating an outdoor setting on a nice day well-suited for enjoying nature."
Token usage:
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.usage_metadata
.. code-block:: python
{'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}
Message chunks containing token usage will be included during streaming by
default:
.. code-block:: python
stream = llm.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full.usage_metadata
.. code-block:: python
{'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}
These can be disabled by setting ``stream_usage=False`` in the stream method,
or by setting ``stream_usage=False`` when initializing ChatAnthropic.
Response metadata
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{'id': 'msg_013xU6FHEGEq76aP4RgFerVT',
'model': 'claude-3-sonnet-20240229',
'stop_reason': 'end_turn',
'stop_sequence': None,
'usage': {'input_tokens': 25, 'output_tokens': 11}}
""" # noqa: E501
model_config = ConfigDict(
populate_by_name=True,
)
_client: anthropic.Client = PrivateAttr(default=None)
_async_client: anthropic.AsyncClient = PrivateAttr(default=None)
model: str = Field(alias="model_name")
"""Model name to use."""
max_tokens: int = Field(default=1024, alias="max_tokens_to_sample")
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: Optional[int] = None
"""Number of most likely tokens to consider at each step."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
default_request_timeout: Optional[float] = Field(None, alias="timeout")
"""Timeout for requests to Anthropic Completion API."""
# sdk default = 2: https://github.com/anthropics/anthropic-sdk-python?tab=readme-ov-file#retries
max_retries: int = 2
"""Number of retries allowed for requests sent to the Anthropic Completion API."""
stop_sequences: Optional[List[str]] = Field(None, alias="stop")
"""Default stop sequences."""
anthropic_api_url: Optional[str] = Field(
alias="base_url",
default_factory=from_env(
["ANTHROPIC_API_URL", "ANTHROPIC_BASE_URL"],
default="https://api.anthropic.com",
),
)
"""Base URL for API requests. Only specify if using a proxy or service emulator.
If a value isn't passed in, will attempt to read the value first from
ANTHROPIC_API_URL and if that is not set, ANTHROPIC_BASE_URL.
If neither are set, the default value of 'https://api.anthropic.com' will
be used.
"""
anthropic_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""),
)
"""Automatically read from env var `ANTHROPIC_API_KEY` if not provided."""
default_headers: Optional[Mapping[str, str]] = None
"""Headers to pass to the Anthropic clients, will be used for every API call."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
streaming: bool = False
"""Whether to use streaming or not."""
stream_usage: bool = True
"""Whether to include usage metadata in streaming output. If True, additional
message chunks will be generated during the stream including usage metadata.
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anthropic_api_key": "ANTHROPIC_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "anthropic"]
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"model_kwargs": self.model_kwargs,
"streaming": self.streaming,
"max_retries": self.max_retries,
"default_request_timeout": self.default_request_timeout,
}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="anthropic",
ls_model_name=self.model,
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None):
ls_params["ls_stop"] = ls_stop
return ls_params
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict) -> Any:
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@model_validator(mode="after")
def post_init(self) -> Self:
api_key = self.anthropic_api_key.get_secret_value()
api_url = self.anthropic_api_url
client_params: Dict[str, Any] = {
"api_key": api_key,
"base_url": api_url,
"max_retries": self.max_retries,
"default_headers": (self.default_headers or None),
}
# value <= 0 indicates the param should be ignored. None is a meaningful value
# for Anthropic client and treated differently than not specifying the param at
# all.
if self.default_request_timeout is None or self.default_request_timeout > 0:
client_params["timeout"] = self.default_request_timeout
self._client = anthropic.Client(**client_params)
self._async_client = anthropic.AsyncClient(**client_params)
return self
def _get_request_payload(
self,
input_: LanguageModelInput,
*,
stop: Optional[List[str]] = None,
**kwargs: Dict,
) -> Dict:
messages = self._convert_input(input_).to_messages()
system, formatted_messages = _format_messages(messages)
payload = {
"model": self.model,
"max_tokens": self.max_tokens,
"messages": formatted_messages,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": stop or self.stop_sequences,
"system": system,
**self.model_kwargs,
**kwargs,
}
return {k: v for k, v in payload.items() if v is not None}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
*,
stream_usage: Optional[bool] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if stream_usage is None:
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = self._client.messages.create(**payload)
coerce_content_to_string = not _tools_in_params(payload)
for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
*,
stream_usage: Optional[bool] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if stream_usage is None:
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = await self._async_client.messages.create(**payload)
coerce_content_to_string = not _tools_in_params(payload)
async for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
data_dict = data.model_dump()
content = data_dict["content"]
llm_output = {
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
}
if len(content) == 1 and content[0]["type"] == "text":
msg = AIMessage(content=content[0]["text"])
elif any(block["type"] == "tool_use" for block in content):
tool_calls = extract_tool_calls(content)
msg = AIMessage(
content=content,
tool_calls=tool_calls,
)
else:
msg = AIMessage(content=content)
msg.usage_metadata = _create_usage_metadata(data.usage)
return ChatResult(
generations=[ChatGeneration(message=msg)],
llm_output=llm_output,
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = self._client.messages.create(**payload)
return self._format_output(data, **kwargs)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = await self._async_client.messages.create(**payload)
return self._format_output(data, **kwargs)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
*,
tool_choice: Optional[
Union[Dict[str, str], Literal["any", "auto"], str]
] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
r"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports Anthropic format tool schemas and any tool definition handled
by :meth:`~langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call. Options are:
- name of the tool as a string or as dict ``{"type": "tool", "name": "<<tool_name>>"}``: calls corresponding tool;
- ``"auto"``, ``{"type: "auto"}``, or None: automatically selects a tool (including no tool);
- ``"any"`` or ``{"type: "any"}``: force at least one tool to be called;
kwargs: Any additional parameters are passed directly to
:meth:`~langchain_anthropic.chat_models.ChatAnthropic.bind`.
Example:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
llm_with_tools = llm.bind_tools([GetWeather, GetPrice])
llm_with_tools.invoke("what is the weather like in San Francisco",)
# -> AIMessage(
# content=[
# {'text': '<thinking>\nBased on the user\'s question, the relevant function to call is GetWeather, which requires the "location" parameter.\n\nThe user has directly specified the location as "San Francisco". Since San Francisco is a well known city, I can reasonably infer they mean San Francisco, CA without needing the state specified.\n\nAll the required parameters are provided, so I can proceed with the API call.\n</thinking>', 'type': 'text'},
# {'text': None, 'type': 'tool_use', 'id': 'toolu_01SCgExKzQ7eqSkMHfygvYuu', 'name': 'GetWeather', 'input': {'location': 'San Francisco, CA'}}
# ],
# response_metadata={'id': 'msg_01GM3zQtoFv8jGQMW7abLnhi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 487, 'output_tokens': 145}},
# id='run-87b1331e-9251-4a68-acef-f0a018b639cc-0'
# )
Example — force tool call with tool_choice 'any':
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
llm_with_tools = llm.bind_tools([GetWeather, GetPrice], tool_choice="any")
llm_with_tools.invoke("what is the weather like in San Francisco",)
Example — force specific tool call with tool_choice '<name_of_tool>':
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
llm_with_tools = llm.bind_tools([GetWeather, GetPrice], tool_choice="GetWeather")
llm_with_tools.invoke("what is the weather like in San Francisco",)
Example — cache specific tools:
.. code-block:: python
from langchain_anthropic import ChatAnthropic, convert_to_anthropic_tool
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPrice(BaseModel):
'''Get the price of a specific product.'''
product: str = Field(..., description="The product to look up.")
# We'll convert our pydantic class to the anthropic tool format
# before passing to bind_tools so that we can set the 'cache_control'
# field on our tool.
cached_price_tool = convert_to_anthropic_tool(GetPrice)
# Currently the only supported "cache_control" value is
# {"type": "ephemeral"}.
cached_price_tool["cache_control"] = {"type": "ephemeral"}
# We need to pass in extra headers to enable use of the beta cache
# control API.
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620",
temperature=0,
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}
)
llm_with_tools = llm.bind_tools([GetWeather, cached_price_tool])
llm_with_tools.invoke("what is the weather like in San Francisco",)
This outputs:
.. code-block:: python
AIMessage(content=[{'text': "Certainly! I can help you find out the current weather in San Francisco. To get this information, I'll use the GetWeather function. Let me fetch that data for you right away.", 'type': 'text'}, {'id': 'toolu_01TS5h8LNo7p5imcG7yRiaUM', 'input': {'location': 'San Francisco, CA'}, 'name': 'GetWeather', 'type': 'tool_use'}], response_metadata={'id': 'msg_01Xg7Wr5inFWgBxE5jH9rpRo', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 171, 'output_tokens': 96, 'cache_creation_input_tokens': 1470, 'cache_read_input_tokens': 0}}, id='run-b36a5b54-5d69-470e-a1b0-b932d00b089e-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'toolu_01TS5h8LNo7p5imcG7yRiaUM', 'type': 'tool_call'}], usage_metadata={'input_tokens': 171, 'output_tokens': 96, 'total_tokens': 267})
If we invoke the tool again, we can see that the "usage" information in the AIMessage.response_metadata shows that we had a cache hit:
.. code-block:: python
AIMessage(content=[{'text': 'To get the current weather in San Francisco, I can use the GetWeather function. Let me check that for you.', 'type': 'text'}, {'id': 'toolu_01HtVtY1qhMFdPprx42qU2eA', 'input': {'location': 'San Francisco, CA'}, 'name': 'GetWeather', 'type': 'tool_use'}], response_metadata={'id': 'msg_016RfWHrRvW6DAGCdwB6Ac64', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 171, 'output_tokens': 82, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 1470}}, id='run-88b1f825-dcb7-4277-ac27-53df55d22001-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'toolu_01HtVtY1qhMFdPprx42qU2eA', 'type': 'tool_call'}], usage_metadata={'input_tokens': 171, 'output_tokens': 82, 'total_tokens': 253})
""" # noqa: E501
formatted_tools = [convert_to_anthropic_tool(tool) for tool in tools]
if not tool_choice:
pass
elif isinstance(tool_choice, dict):
kwargs["tool_choice"] = tool_choice
elif isinstance(tool_choice, str) and tool_choice in ("any", "auto"):
kwargs["tool_choice"] = {"type": tool_choice}
elif isinstance(tool_choice, str):
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
else:
raise ValueError(
f"Unrecognized 'tool_choice' type {tool_choice=}. Expected dict, "
f"str, or None."
)
return self.bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- an Anthropic tool schema,
- an OpenAI function/tool schema,
- a JSON Schema,
- a TypedDict class,
- or a Pydantic class.
If ``schema`` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated. See :meth:`~langchain_core.utils.function_calling.convert_to_openai_tool`
for more on how to properly specify types and descriptions of
schema fields when specifying a Pydantic or TypedDict class.
include_raw:
If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
kwargs: Additional keyword arguments are ignored.
Returns:
A Runnable that takes same inputs as a :class:`~langchain_core.language_models.chat.BaseChatModel`.
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
an instance of ``schema`` (i.e., a Pydantic object).
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
If ``include_raw`` is True, then Runnable outputs a dict with keys:
- ``"raw"``: BaseMessage
- ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
- ``"parsing_error"``: Optional[BaseException]
Example: Pydantic schema (include_raw=False):
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
Example: Pydantic schema (include_raw=True):
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
Example: Dict schema (include_raw=False):
.. code-block:: python
from langchain_anthropic import ChatAnthropic
schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"input_schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {"type": "string"},
},
"required": ["answer", "justification"]
}
}
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)
structured_llm = llm.with_structured_output(schema)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
.. versionchanged:: 0.1.22
Added support for TypedDict class as `schema`.
""" # noqa: E501
tool_name = convert_to_anthropic_tool(schema)["name"]
llm = self.bind_tools([schema], tool_choice=tool_name)
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], first_tool_only=True
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
@beta()
def get_num_tokens_from_messages(
self,
messages: List[BaseMessage],
tools: Optional[
Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]
] = None,
) -> int:
"""Count tokens in a sequence of input messages.
Args:
messages: The message inputs to tokenize.
tools: If provided, sequence of dict, BaseModel, function, or BaseTools
to be converted to tool schemas.
Basic usage:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage, SystemMessage
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
messages = [
SystemMessage(content="You are a scientist"),
HumanMessage(content="Hello, Claude"),
]
llm.get_num_tokens_from_messages(messages)
.. code-block:: none
14
Pass tool schemas:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
@tool(parse_docstring=True)
def get_weather(location: str) -> str:
\"\"\"Get the current weather in a given location
Args:
location: The city and state, e.g. San Francisco, CA
\"\"\"
return "Sunny"
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
]
llm.get_num_tokens_from_messages(messages, tools=[get_weather])
.. code-block:: none
403
.. versionchanged:: 0.3.0
Uses Anthropic's token counting API to count tokens in messages. See:
https://docs.anthropic.com/en/docs/build-with-claude/token-counting
"""
formatted_system, formatted_messages = _format_messages(messages)
kwargs: Dict[str, Any] = {}
if isinstance(formatted_system, str):
kwargs["system"] = formatted_system
if tools:
kwargs["tools"] = [convert_to_anthropic_tool(tool) for tool in tools]
response = self._client.beta.messages.count_tokens(
betas=["token-counting-2024-11-01"],
model=self.model,
messages=formatted_messages, # type: ignore[arg-type]
**kwargs,
)
return response.input_tokens
class AnthropicTool(TypedDict):
"""Anthropic tool definition."""
name: str
description: str
input_schema: Dict[str, Any]
cache_control: NotRequired[Dict[str, str]]
def convert_to_anthropic_tool(
tool: Union[Dict[str, Any], Type, Callable, BaseTool],
) -> AnthropicTool:
"""Convert a tool-like object to an Anthropic tool definition."""
# already in Anthropic tool format
if isinstance(tool, dict) and all(
k in tool for k in ("name", "description", "input_schema")
):
anthropic_formatted = AnthropicTool(tool) # type: ignore
else:
oai_formatted = convert_to_openai_tool(tool)["function"]
anthropic_formatted = AnthropicTool(
name=oai_formatted["name"],
description=oai_formatted["description"],
input_schema=oai_formatted["parameters"],
)
return anthropic_formatted
def _tools_in_params(params: dict) -> bool:
return "tools" in params or (
"extra_body" in params and params["extra_body"].get("tools")
)
class _AnthropicToolUse(TypedDict):
type: Literal["tool_use"]
name: str
input: dict
id: str
def _lc_tool_calls_to_anthropic_tool_use_blocks(
tool_calls: List[ToolCall],
) -> List[_AnthropicToolUse]:
blocks = []
for tool_call in tool_calls:
blocks.append(
_AnthropicToolUse(
type="tool_use",
name=tool_call["name"],
input=tool_call["args"],
id=cast(str, tool_call["id"]),
)
)
return blocks
def _make_message_chunk_from_anthropic_event(
event: anthropic.types.RawMessageStreamEvent,
*,
stream_usage: bool = True,
coerce_content_to_string: bool,
) -> Optional[AIMessageChunk]:
"""Convert Anthropic event to AIMessageChunk.
Note that not all events will result in a message chunk. In these cases
we return None.
"""
message_chunk: Optional[AIMessageChunk] = None
# See https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/lib/streaming/_messages.py # noqa: E501
if event.type == "message_start" and stream_usage:
usage_metadata = _create_usage_metadata(event.message.usage)
message_chunk = AIMessageChunk(
content="" if coerce_content_to_string else [],
usage_metadata=usage_metadata,
)
elif (
event.type == "content_block_start"
and event.content_block is not None
and event.content_block.type == "tool_use"
):
if coerce_content_to_string:
warnings.warn("Received unexpected tool content block.")
content_block = event.content_block.model_dump()
content_block["index"] = event.index
tool_call_chunk = create_tool_call_chunk(
index=event.index,
id=event.content_block.id,
name=event.content_block.name,
args="",
)
message_chunk = AIMessageChunk(
content=[content_block],
tool_call_chunks=[tool_call_chunk], # type: ignore
)
elif event.type == "content_block_delta":
if event.delta.type == "text_delta":
if coerce_content_to_string:
text = event.delta.text
message_chunk = AIMessageChunk(content=text)
else:
content_block = event.delta.model_dump()
content_block["index"] = event.index
content_block["type"] = "text"
message_chunk = AIMessageChunk(content=[content_block])
elif event.delta.type == "input_json_delta":
content_block = event.delta.model_dump()
content_block["index"] = event.index
content_block["type"] = "tool_use"
tool_call_chunk = create_tool_call_chunk(
index=event.index,
id=None,
name=None,
args=event.delta.partial_json,
)
message_chunk = AIMessageChunk(
content=[content_block],
tool_call_chunks=[tool_call_chunk], # type: ignore
)
elif event.type == "message_delta" and stream_usage:
usage_metadata = _create_usage_metadata(event.usage)
message_chunk = AIMessageChunk(
content="",
usage_metadata=usage_metadata,
response_metadata={
"stop_reason": event.delta.stop_reason,
"stop_sequence": event.delta.stop_sequence,
},
)
else:
pass
return message_chunk
@deprecated(since="0.1.0", removal="1.0.0", alternative="ChatAnthropic")
class ChatAnthropicMessages(ChatAnthropic):
pass
def _create_usage_metadata(anthropic_usage: BaseModel) -> UsageMetadata:
input_token_details: Dict = {
"cache_read": getattr(anthropic_usage, "cache_read_input_tokens", None),
"cache_creation": getattr(anthropic_usage, "cache_creation_input_tokens", None),
}
# Anthropic input_tokens exclude cached token counts.
input_tokens = (
getattr(anthropic_usage, "input_tokens", 0)
+ (input_token_details["cache_read"] or 0)
+ (input_token_details["cache_creation"] or 0)
)
output_tokens = getattr(anthropic_usage, "output_tokens", 0)
return UsageMetadata(
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=input_tokens + output_tokens,
input_token_details=InputTokenDetails(
**{k: v for k, v in input_token_details.items() if v is not None}
),
)
|
0 | lc_public_repos/langchain/libs/partners/anthropic | lc_public_repos/langchain/libs/partners/anthropic/langchain_anthropic/experimental.py | import json
from typing import (
Any,
Dict,
List,
Union,
)
from langchain_core._api import deprecated
from pydantic import PrivateAttr
from langchain_anthropic.chat_models import ChatAnthropic
SYSTEM_PROMPT_FORMAT = """In this environment you have access to a set of tools you can use to answer the user's question.
You may call them like this:
<function_calls>
<invoke>
<tool_name>$TOOL_NAME</tool_name>
<parameters>
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
...
</parameters>
</invoke>
</function_calls>
Here are the tools available:
<tools>
{formatted_tools}
</tools>""" # noqa: E501
TOOL_FORMAT = """<tool_description>
<tool_name>{tool_name}</tool_name>
<description>{tool_description}</description>
<parameters>
{formatted_parameters}
</parameters>
</tool_description>"""
TOOL_PARAMETER_FORMAT = """<parameter>
<name>{parameter_name}</name>
<type>{parameter_type}</type>
<description>{parameter_description}</description>
</parameter>"""
def _get_type(parameter: Dict[str, Any]) -> str:
if "type" in parameter:
return parameter["type"]
if "anyOf" in parameter:
return json.dumps({"anyOf": parameter["anyOf"]})
if "allOf" in parameter:
return json.dumps({"allOf": parameter["allOf"]})
return json.dumps(parameter)
def get_system_message(tools: List[Dict]) -> str:
"""Generate a system message that describes the available tools."""
tools_data: List[Dict] = [
{
"tool_name": tool["name"],
"tool_description": tool["description"],
"formatted_parameters": "\n".join(
[
TOOL_PARAMETER_FORMAT.format(
parameter_name=name,
parameter_type=_get_type(parameter),
parameter_description=parameter.get("description"),
)
for name, parameter in tool["parameters"]["properties"].items()
]
),
}
for tool in tools
]
tools_formatted = "\n".join(
[
TOOL_FORMAT.format(
tool_name=tool["tool_name"],
tool_description=tool["tool_description"],
formatted_parameters=tool["formatted_parameters"],
)
for tool in tools_data
]
)
return SYSTEM_PROMPT_FORMAT.format(formatted_tools=tools_formatted)
def _xml_to_dict(t: Any) -> Union[str, Dict[str, Any]]:
# Base case: If the element has no children, return its text or an empty string.
if len(t) == 0:
return t.text or ""
# Recursive case: The element has children. Convert them into a dictionary.
d: Dict[str, Any] = {}
for child in t:
if child.tag not in d:
d[child.tag] = _xml_to_dict(child)
else:
# Handle multiple children with the same tag
if not isinstance(d[child.tag], list):
d[child.tag] = [d[child.tag]] # Convert existing entry into a list
d[child.tag].append(_xml_to_dict(child))
return d
def _xml_to_function_call(invoke: Any, tools: List[Dict]) -> Dict[str, Any]:
name = invoke.find("tool_name").text
arguments = _xml_to_dict(invoke.find("parameters"))
# make list elements in arguments actually lists
filtered_tools = [tool for tool in tools if tool["name"] == name]
if len(filtered_tools) > 0 and not isinstance(arguments, str):
tool = filtered_tools[0]
for key, value in arguments.items():
if key in tool["parameters"]["properties"]:
if "type" in tool["parameters"]["properties"][key]:
if tool["parameters"]["properties"][key][
"type"
] == "array" and not isinstance(value, list):
arguments[key] = [value]
if (
tool["parameters"]["properties"][key]["type"] != "object"
and isinstance(value, dict)
and len(value.keys()) == 1
):
arguments[key] = list(value.values())[0]
return {
"function": {
"name": name,
"arguments": json.dumps(arguments),
},
"type": "function",
}
def _xml_to_tool_calls(elem: Any, tools: List[Dict]) -> List[Dict[str, Any]]:
"""
Convert an XML element and its children into a dictionary of dictionaries.
"""
invokes = elem.findall("invoke")
return [_xml_to_function_call(invoke, tools) for invoke in invokes]
@deprecated(
"0.1.5",
removal="1.0.0",
alternative="ChatAnthropic",
message=(
"Tool-calling is now officially supported by the Anthropic API so this "
"workaround is no longer needed."
),
)
class ChatAnthropicTools(ChatAnthropic):
"""Chat model for interacting with Anthropic functions."""
_xmllib: Any = PrivateAttr(default=None)
|
0 | lc_public_repos/langchain/libs/partners/anthropic | lc_public_repos/langchain/libs/partners/anthropic/langchain_anthropic/__init__.py | from langchain_anthropic.chat_models import ChatAnthropic, ChatAnthropicMessages
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = ["ChatAnthropicMessages", "ChatAnthropic", "Anthropic", "AnthropicLLM"]
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/integration_tests/test_standard.py | """Standard LangChain interface tests"""
from pathlib import Path
from typing import Dict, List, Literal, Type, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> Dict[
Literal["invoke", "stream"],
List[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md", "r") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md", "r") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/integration_tests/test_chat_models.py | """Test ChatAnthropic chat model."""
import json
from typing import List, Optional
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
from tests.unit_tests._utils import FakeCallbackHandler
MODEL_NAME = "claude-3-5-sonnet-20240620"
def test_stream() -> None:
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
full: Optional[BaseMessageChunk] = None
chunks_with_input_token_counts = 0
chunks_with_output_token_counts = 0
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
full = token if full is None else full + token
assert isinstance(token, AIMessageChunk)
if token.usage_metadata is not None:
if token.usage_metadata.get("input_tokens"):
chunks_with_input_token_counts += 1
elif token.usage_metadata.get("output_tokens"):
chunks_with_output_token_counts += 1
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
raise AssertionError(
"Expected exactly one chunk with input or output token counts. "
"AIMessageChunk aggregation adds counts. Check that "
"this is behaving properly."
)
# check token usage is populated
assert isinstance(full, AIMessageChunk)
assert full.usage_metadata is not None
assert full.usage_metadata["input_tokens"] > 0
assert full.usage_metadata["output_tokens"] > 0
assert full.usage_metadata["total_tokens"] > 0
assert (
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
== full.usage_metadata["total_tokens"]
)
assert "stop_reason" in full.response_metadata
assert "stop_sequence" in full.response_metadata
async def test_astream() -> None:
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
full: Optional[BaseMessageChunk] = None
chunks_with_input_token_counts = 0
chunks_with_output_token_counts = 0
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
full = token if full is None else full + token
assert isinstance(token, AIMessageChunk)
if token.usage_metadata is not None:
if token.usage_metadata.get("input_tokens"):
chunks_with_input_token_counts += 1
elif token.usage_metadata.get("output_tokens"):
chunks_with_output_token_counts += 1
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
raise AssertionError(
"Expected exactly one chunk with input or output token counts. "
"AIMessageChunk aggregation adds counts. Check that "
"this is behaving properly."
)
# check token usage is populated
assert isinstance(full, AIMessageChunk)
assert full.usage_metadata is not None
assert full.usage_metadata["input_tokens"] > 0
assert full.usage_metadata["output_tokens"] > 0
assert full.usage_metadata["total_tokens"] > 0
assert (
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
== full.usage_metadata["total_tokens"]
)
assert "stop_reason" in full.response_metadata
assert "stop_sequence" in full.response_metadata
# test usage metadata can be excluded
model = ChatAnthropic(model_name=MODEL_NAME, stream_usage=False) # type: ignore[call-arg]
async for token in model.astream("hi"):
assert isinstance(token, AIMessageChunk)
assert token.usage_metadata is None
# check we override with kwarg
model = ChatAnthropic(model_name=MODEL_NAME) # type: ignore[call-arg]
assert model.stream_usage
async for token in model.astream("hi", stream_usage=False):
assert isinstance(token, AIMessageChunk)
assert token.usage_metadata is None
# Check expected raw API output
async_client = model._async_client
params: dict = {
"model": "claude-3-haiku-20240307",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "hi"}],
"temperature": 0.0,
}
stream = await async_client.messages.create(**params, stream=True)
async for event in stream:
if event.type == "message_start":
assert event.message.usage.input_tokens > 1
# Note: this single output token included in message start event
# does not appear to contribute to overall output token counts. It
# is excluded from the total token count.
assert event.message.usage.output_tokens == 1
elif event.type == "message_delta":
assert event.usage.output_tokens > 1
else:
pass
async def test_abatch() -> None:
"""Test streaming tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
async def test_async_tool_use() -> None:
llm = ChatAnthropic( # type: ignore[call-arg]
model=MODEL_NAME,
)
llm_with_tools = llm.bind_tools(
[
{
"name": "get_weather",
"description": "Get weather report for a city",
"input_schema": {
"type": "object",
"properties": {"location": {"type": "string"}},
},
}
]
)
response = await llm_with_tools.ainvoke("what's the weather in san francisco, ca")
assert isinstance(response, AIMessage)
assert isinstance(response.content, list)
assert isinstance(response.tool_calls, list)
assert len(response.tool_calls) == 1
tool_call = response.tool_calls[0]
assert tool_call["name"] == "get_weather"
assert isinstance(tool_call["args"], dict)
assert "location" in tool_call["args"]
# Test streaming
first = True
chunks = [] # type: ignore
async for chunk in llm_with_tools.astream(
"what's the weather in san francisco, ca"
):
chunks = chunks + [chunk]
if first:
gathered = chunk
first = False
else:
gathered = gathered + chunk # type: ignore
assert len(chunks) > 1
assert isinstance(gathered, AIMessageChunk)
assert isinstance(gathered.tool_call_chunks, list)
assert len(gathered.tool_call_chunks) == 1
tool_call_chunk = gathered.tool_call_chunks[0]
assert tool_call_chunk["name"] == "get_weather"
assert isinstance(tool_call_chunk["args"], str)
assert "location" in json.loads(tool_call_chunk["args"])
def test_batch() -> None:
"""Test batch tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
def test_invoke() -> None:
"""Test invoke tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)
def test_system_invoke() -> None:
"""Test invoke tokens with a system message"""
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert cartographer. If asked, you are a cartographer. "
"STAY IN CHARACTER",
),
("human", "Are you a mathematician?"),
]
)
chain = prompt | llm
result = chain.invoke({})
assert isinstance(result.content, str)
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.stream([message])
for token in response:
assert isinstance(token, AIMessageChunk)
assert isinstance(token.content, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic( # type: ignore[call-arg]
model=MODEL_NAME,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
for token in chat.stream([message]):
assert isinstance(token, AIMessageChunk)
assert isinstance(token.content, str)
assert callback_handler.llm_streams > 1
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic( # type: ignore[call-arg]
model=MODEL_NAME,
callback_manager=callback_manager,
verbose=True,
)
chat_messages: List[BaseMessage] = [
HumanMessage(content="How many toes do dogs have?")
]
async for token in chat.astream(chat_messages):
assert isinstance(token, AIMessageChunk)
assert isinstance(token.content, str)
assert callback_handler.llm_streams > 1
def test_anthropic_multimodal() -> None:
"""Test that multimodal inputs are handled correctly."""
chat = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
messages: list[BaseMessage] = [
HumanMessage(
content=[
{
"type": "image_url",
"image_url": {
# langchain logo
"url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAMCAggHCQgGCQgICAcICAgICAgICAYICAgHDAgHCAgICAgIBggICAgICAgICBYICAgICwkKCAgNDQoIDggICQgBAwQEBgUGCgYGCBALCg0QCg0NEA0KCg8LDQoKCgoLDgoQDQoLDQoKCg4NDQ0NDgsQDw0OCg4NDQ4NDQoJDg8OCP/AABEIALAAsAMBEQACEQEDEQH/xAAdAAEAAgEFAQAAAAAAAAAAAAAABwgJAQIEBQYD/8QANBAAAgIBAwIDBwQCAgIDAAAAAQIAAwQFERIIEwYhMQcUFyJVldQjQVGBcZEJMzJiFRYk/8QAGwEBAAMAAwEAAAAAAAAAAAAAAAQFBgEDBwL/xAA5EQACAQIDBQQJBAIBBQAAAAAAAQIDEQQhMQVBUWGREhRxgRMVIjJSU8HR8CNyobFCguEGJGKi4v/aAAwDAQACEQMRAD8ApfJplBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBANl16qOTEKB6kkAD+z5Tkcj0On+z7Ub1FlOmanejeavj6dqV6kfsQ1OK4IP8AIM6pVYR1kuqJdLCV6qvCnJ/6v66nL+Ems/RNc+y63+BOvvFL411O/wBW4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6HE1D2e6lQpsu0zU6EXzZ8jTtSoUD9yWuxUAA/kmdkasJaSXVHRVwlekrzpyX+r+mh56m9WHJSGU+hUgg/wBjynaRORvnAEAQBAEAQBAEAQCbennpVzfER95LHE0tX4tlsnJr2B2srw6yQLCpBQ3Me1W+4/VZLKlh4jFRo5ay4cPH7f0XWA2XUxft37MONs34ffRcy/Xsu6bdG0UK2Nh1tkAbHMyAt+Wx2HIi11/SDcQe3jrTXv6IJRVcRUqe88uC0Nxhdn0MMv0458XnJ+e7wVlyJPJkYsTSAIAgCAIAgCAIBqDAIx9qHTbo2tBmycOtcgjYZmOBRlqdjxJtQDuhdye3ette/qhkmliKlP3XlwehXYrZ9DEr9SOfFZS6rXwd1yKCdQ3Srm+HT7yGOXpbPxXLVOLUMTtXXmVgkVliQgvU9qx9h+kz11Ne4fFRrZaS4cfD7f2YfH7LqYT279qHHevH76PlvhKTClEAQBAEAQBAJp6WOn0+I80i7mumYnF8x1LIbSSe3iV2DYq13ElnQ8q6gdijWUuIeKxHoY5e89PuXWy8D3qp7S9iOvN/D9+XiZRNN06uiuvHqrSqmpFrqqrVUrrrUBUREUBVVVAAUAAATNNtu7PR4xUUoxVkskloktxyCZwfRj26jetHPtzrMXSM4Uabj7Vrfj10O2ZdsDbb3bqrCKEYmpeyED8Hs53LZVwvsPg4qN6kbt+OS8t5hdobYqOo44edorK6SzfmtFpz14H16f8Arkz6cmrD1e9crBvsFZy3ropvxC2yo7NTXXXbjhtuXcTmisz91hX2yr4KLjemrNbuPXeMDtuoqihiGnF/5ZJx55ZNceF76GQSUJuhAEAQBAEAhb239WWl+H391s7mXnbAnExu2WqUjdWyLHda6Qw2IXdrCCGFZX5pMo4WdXNZLiyoxm1KOFfZl7UuCtdeN2kvzcRB4d/5JMV7OOVpWRRSWAFmPk1ZTKN9uT1PRi+QHnsj2H12DHYGXLZzS9mV3zVvuVFL/qGDlapSaXFST6qyfS/3tb4M8a4up49WoYlyZGLcCUsTf1B2ZGVgHrsRgVNbqrIwIYAjaVc4Sg+zJWZqaVWFWCnB3T0/PodnqOnV312Y9taW02o1dtViq9dlbAq6OjAqyspIKkEEGfKbTuj7lFSTjJXTyaejXAxd9U/T6fDmYBTzbTMvm+G7FnNRBHcxLLDuWankCrueVlRG5dq7nOlwuI9NHP3lr9zzjamA7rU9n3Jacn8P25eBC0mFKIAgCAIBtdwASfQDc/4nIbsZXulr2ZDR9HwsYpxybqxmZe4Xl71cquyMR69hO3jg+fy0r5n1OWxNX0lRvdovBflz1DZuG7vh4xtZtXl+55vpp5EsyKWZ5X2seH783TdRwsZgmVk4OVRQzMUUXPRYle7gEoCxA5gEqDvsdp2U5KM03omv7I+Ig6lKUIuzaaXmigPtb6HNQ0bEytTGXjZeLiKlhWuu6rINPMLbY1bFqkXHQ908b7CyK+wUqFe+pY2FSSjZpvnl+MwmJ2JVw9OVTtqUYq+Sadt+WaVtd9+W+uLLv5HzB8j/AIlgZ8yRdGfUXXq2JXpGTZtquFUE+cnfMxU2Wu9CzEvaicEsG+/MdzYLbsmexmHdOXaS9l/w+H2PQ9kY9V6apyftxVtdUtJc3x58iykrjQCAIAgFdurzqbPh+lMHFKHVspC6FuLLh427Icp0O4d2ZWREb5WZLGbktJrssMJhvSu8vdX8vh9zP7X2i8LBRp27b46Rj8Vt73JebyVnCfSz0jNqh/8AsGsrZZRcxuoxrms7ua7HmcvLYkOaXJ5Ctjvkb8n/AE+K3TcVi+x+nS6rdyX33eJTbL2S636+JTaeaTveTf8AlLlwjv35ZFmfHnSnoWo47Yo0/FxLOBWnJw8ejHuobb5GVqkUOqnY9qwOjDyI9CKyGKqwd+03ybdjS19mYarHs+jSe5pJNdP6KudBPiTIwNYz/D1jA1WJk91AWKLqGJctDWVg+QFlfdQtsGcVY+//AFgSzx0VKmqi5dJK/wCeZm9iVJ0sRPDye6WWdu1BpXWeV78M8uGd/wCURuCJuqX2YjWNHzMYJyyaKzmYm3Hl71SrOqKW8h307mOT5fLc3mPUSsNV9HUT3aPwf5crNpYbvGHlG2azj+5Zrrp5mKFHBAI9CNx/iak8vTubpwBAEAQDtPCekLk5WHiON0yczFx3H8pbkVVMP7VyJ8zfZi3wTfRHdRh26kI8ZRXk5IzREf6mPPXTSAIB1/iPQa8yjIwrVD05NFuPYrAFWrsrat1YHyIKsRsf2nMXZpo+ZR7UXF77rqYW2xHrJqsHG2smu1T6rapKWKf8OCP6mxvfNHj1nH2XqsnfW6yOVpGr241teVRY9ORS4sqtrPF67B6Mp/2NiCGBIIYMQeGlJWaujsp1JU5KcHZrQyZdK/U3X4ipONdwq1fGQNkVL5JkVbhfe8cE/wDgWKq1e5NFjKD8ttLPm8ThnSd17r0+35qej7N2hHFQs8prVfVcv6J4kIuBAKtdWnV8uj89I090fVeP/wCi8hXq05CvIcg26PmMpDCpgVqUrZaCGqrussLhPSe3P3f7/wCOf4s9tTaXd16On77/APXn48EU58OYl+RremrrRyHbJzdPbI9+LvZZjW21vUlgs5FMe4OqmshVrrscca9jtcSaVKXotydrcVr58zH04znioLFXd3G/a17L08E3u5vJEveGeobX/Cuq2YmttbbjX3NflUu7ZC1VW2OTlaZZuzDHrIbbGXZOFbV9qmwfLElh6Venelqsl4rc+fP6FtT2hicHiHDEu8W7u+ii8lKObtHL3fH/AC1tn1AdReJ4exVvJW/MyEJwcVWG9x2G1zkb8MVNwTbt83kqhmYCVVDDyqytot7/ADeanG46GFh2nm37q4/8c/qVr/4/fZ9k5Obm+J7+Xa430V2soVcrNuuW3LtT+RQUNZKjj3L2QHlRYqWOPqJRVJcvJJWRnth4epKpLE1FqnZ8XJ3b8MuG/LQvdKQ2ZqB/qAYXfFmkLjZWZiINkxszKx0H8JVkW1KP6VAJsIPtRT4pPqjyKtDsVJx4SkvJSdjq59HSIAgCAdp4T1dcbKw8tzsmNmYuQ5/hKsiq1j/SoTPma7UWuKa6o7qM+xUhLhKL8lJXM0RP+pjz100gCAIBjA6x/Y9ZpGq35KofcdSssy8ewA8Vvcl8rHJ3OzrazXAeQNVq8d+3Zx0mDrKpTS3rLy3P6HnG18I6FdzS9mWa/c9V9fPkQTJxRnf+AfHeRpOXj6pjHa/GsDhd+K2p6W0WHY/p31lqidiVDchsyqR8VIKpFxlo/wAv5EjD15UKiqw1X8revMy++DfFtOo4uNqNDcsfKprvrJ8iFZQeLD1Dod0KnzVlI/aZKcXCTi9UerUqkasFOLumk14M8T1L+0uzRdHzdRp8skKlGO2wPC+6xKUt2PkezzN3E7g8NtjvO7D01UqKL03+CzIe0MQ8Ph5VI66Lxbsv7Ks9D3ThTqG/iXOBvSvJsGHTae4L8lWDXZ2QzMzXMt7MoWzzNyW2PzPaYWeNxDj+nDLLPw4dPsZ7Y+CVb/ua3tO7tfitZPzyS5XJS6zOlu3XAmrYSh9Rpq7N2OzKozMYF3RUZyEXIqZ325lVtVyrMOFUjYPEql7MtP6f2J+1tmvE2qU/fWWusfo1/P8AVWfbjruoWabpFGrl/wD5Wq/UOyMhO3mV6QFxaU98BCuzW5dNxW2wcraqeZawku1pQjFVJOn7uWmna1y8uhmMdUqOhSjiPfTlr73o0rXfi1k96V7nq/YP0n6lr99OdqgysfS6qqKw2QbK8rKx6kWrHxcdG2toxlrUA3lU+Q71c3ta+rpr4qFJONOzlnpom9/N8vpkTMBsyriZKeITUEla+rSyUbapLyvzeZkT0fR6saqvFprSmilFrqqrUJXXWo2VEUABVUDbYSgbbd3qbyMVFWSskcucH0ag/wCoBhd8WauuTlZmWh3TIzMrIQ/yluRbap/tXBmwguzFLgkuiPIq0+3UnLjKT8nJ2Orn0dIgCAIBtdAQQfQjY/4nIauZXulr2nDWNHw8kvyyaKxh5e/Hl71SqozsF8h307eQB5fLcvkPQZbE0vR1Gt2q8H+WPUNm4nvGHjK92spfuWT66+ZLMilmIAgHm/aL4ExtVxL9PyaVvptRtkb1WwA9uyths1dqNsRYhDKf39Z905uElKLszor0YVoOE1dP86mH7R/DORdi5OeKz2sI4iZZIKtU+Q11dPJSvl+rS1ZBIKsyDY7krrXJKSjxvbyzPKY0ZuMprSNlLim21p4rPh1t6fA9ieq34Ka1RhW5OA7XKbMcC6ypq7DU/doT9cLyBPNK7ECglmT0nW60FLsN2fPnnroSI4KvKl6aMLxz0zeTavbW3hfy3Wq/4+fbVQKbPDd9wW7vWZGnK2wW2l17l9FTehsS0W5PA/M62uV5CqzhV4+i7+kS5Px4/T8z02wcXHsvDyed24+DzaXg7u3PLLSderP2f3arombi0KXyEFWVVWBu1jU2pc1SD93sqWxAP3dlkHC1FCqm9NOuRd7ToOvhpwjrk14xadv4K7dEPU5gYOI2iZ+RXiql1l2Hk2fJjtVae5ZVbaSUrsW42WB7O2jpYqg8k+exxuGnKXbgr8eOWXmUGxtpUqdP0FV9m12m9Gm72/8AFp8dfEmb22dZmlaXjv7nk42pag4K0U49q3U1t5fqZV1LFErTfl2g4st/8VCjnZXDo4Oc37ScVvv9L/iLXG7Xo0IfpyU57kndeLa0X8vRcq59OnsAzPFWY3iTVmezBa3uMbQOWo2qdhSibcUwa+IrPEBSq9pB/wBjV2GIrxoR9HT1/r/6M/s7A1MbU7ziHeN75/5tbuUF/Oml28h0oDfCAIBE/VL7TRo+j5uSr8cm6s4eJtx5e9XKyK6hvJuwncyCPP5aW8j6GVhqXpKiW7V+C/LFZtLE93w8pXzeUf3PJdNfIxQIgAAHoBsP8TUnl6VjdOAIAgCAIBNPSx1BHw5mE3c20zL4JmIoZjUQT28uusblmp5EMiDlZUTsHaulDDxWH9NHL3lp9i62Xj+61Pa9yWvJ/F9+XgZRNN1Ku+uvIqsS2m1FsqtrZXrsrYBkdHUlWVlIIYEggzNNNOzPR4yUkpRd081bRp7zkTg+jUQCH9Q8FeJjnNdVrmImmPx/QfTKXuqAVOXa2ZeTO5tAe29hWq1bpeS8lKdLs2cH2v3Zfn5kVjpYr0t1VXY4djNaaZ+OumWpGh9j2vaVi6pp+NVpep4+ouxQXY9ZzMnKybbGy8rVbNsHENdKMdiot2Raa0pbtjud/pac5RlK6a4PJJaJasivD4inCcIdmSle11m3JttyeStn/RJ/sG8A6no2LgaTaultiY+MwuuxmzUyDlFue4rek1XGxmd3yWspLvuwoTnskevONSTkr58bafm7dxJuDpVaNONOXZsln2b6+evjv4I6jVejTRLMp9TqTLw8xrRkV24eVZT7vkcuZtorKvUjM25KMj1+Z2RdzOxYuoo9l2a5rVcOJGnsnDubqxTjLVOMmrPilnG/k1yJxrXYAbkkADkdtyf5OwA3Pr5AD+APSQi5K7e1zod0nVrnzanu07KtZnuOMK3x7rWO7WPjuNlsY7sWoenmzMzB2YtLCljZ012XmuevUoMVsWhXk5puEnra1m+Nnl0tffmeY8Df8dum49iXZmZkZ4Q79gImJjv/AALQj23Mv/qt6BvRuQJU9lTaE5K0Vb+X9iNQ2BRg71JOfKyUemb/AJ/gtXhYSVIlNaLXVWqpXWiqqIigBURVACqoAAUAAASrbvmzTpJKy0PtByIBx9R1KuiuzItsSqmpGsttsZUrrrUFnd3YhVVVBJYkAATlJt2R8ykopyk7JZtvRJbzF31T9QR8R5gNPNdMxOSYaMGQ2kkdzLsrOxVruICo45V1AbhGsuQaXC4f0Mc/eev2PONqY7vVT2fcjpzfxfbl4kLSYUogCAIAgCAIBNvTz1VZvh0+7FTl6Wz8mxGfi1DE72WYdhBFZYkuaGHasfc/os9lrQ8RhY1s9JcePj9/7LrAbUnhPYt2ocN68Pto+W+/fsv6ktG1oKuNmVrkEbnDyCKMtTsOQFTkd0LuB3KGtr39HMoquHqU/eWXFaG4wu0KGJX6cs+DykvJ6+KuuZJxEjFiaQBAEAQBAEAQBANQIBGHtR6ktG0UMuTmVtkAbjDxyt+Wx2PEGpG/SDcSO5kNTXv6uJJpYepV91ZcXoV2K2hQwy/UlnwWcn5bvF2XMoL1DdVWb4iPuwU4mlq/JcRX5NewO9dmZYABYVIDilR2q32P6rJXat7h8LGjnrLjw8Pv/Rh8ftSpi/Yt2YcL5vx+2i5kJSYUogCAIAgCAIAgCAbLqFYcWAZT6hgCD/R8pyOZ6HT/AGg6lQorp1PU6EXyVMfUdSoUD9gFpykAA/gCdUqUJaxXREuli69JWhUkv9n9Tl/FvWfreufetb/PnX3el8C6Hf6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/wA+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819Tiah7QdRvU13anqd6N5MmRqOpXqR+4K3ZTgg/wROyNKEdIrojoqYuvVVp1JP/Z/TU89TQqjioCgegAAA/oeU7SJzN84AgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgH/9k=", # noqa: E501
},
},
{"type": "text", "text": "What is this a logo for?"},
]
)
]
response = chat.invoke(messages)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
num_tokens = chat.get_num_tokens_from_messages(messages)
assert num_tokens > 0
def test_streaming() -> None:
"""Test streaming tokens from Anthropic."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg]
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
)
response = llm.generate([[HumanMessage(content="I'm Pickle Rick")]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
async def test_astreaming() -> None:
"""Test streaming tokens from Anthropic."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg]
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
)
response = await llm.agenerate([[HumanMessage(content="I'm Pickle Rick")]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
def test_tool_use() -> None:
llm = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
llm_with_tools = llm.bind_tools(
[
{
"name": "get_weather",
"description": "Get weather report for a city",
"input_schema": {
"type": "object",
"properties": {"location": {"type": "string"}},
},
}
]
)
response = llm_with_tools.invoke("what's the weather in san francisco, ca")
assert isinstance(response, AIMessage)
assert isinstance(response.content, list)
assert isinstance(response.tool_calls, list)
assert len(response.tool_calls) == 1
tool_call = response.tool_calls[0]
assert tool_call["name"] == "get_weather"
assert isinstance(tool_call["args"], dict)
assert "location" in tool_call["args"]
# Test streaming
input = "how are you? what's the weather in san francisco, ca"
first = True
chunks = [] # type: ignore
for chunk in llm_with_tools.stream(input):
chunks = chunks + [chunk]
if first:
gathered = chunk
first = False
else:
gathered = gathered + chunk # type: ignore
assert len(chunks) > 1
assert isinstance(gathered.content, list)
assert len(gathered.content) == 2
tool_use_block = None
for content_block in gathered.content:
assert isinstance(content_block, dict)
if content_block["type"] == "tool_use":
tool_use_block = content_block
break
assert tool_use_block is not None
assert tool_use_block["name"] == "get_weather"
assert "location" in json.loads(tool_use_block["partial_json"])
assert isinstance(gathered, AIMessageChunk)
assert isinstance(gathered.tool_calls, list)
assert len(gathered.tool_calls) == 1
tool_call = gathered.tool_calls[0]
assert tool_call["name"] == "get_weather"
assert isinstance(tool_call["args"], dict)
assert "location" in tool_call["args"]
assert tool_call["id"] is not None
# Test passing response back to model
stream = llm_with_tools.stream(
[
input,
gathered,
ToolMessage(content="sunny and warm", tool_call_id=tool_call["id"]),
]
)
chunks = [] # type: ignore
first = True
for chunk in stream:
chunks = chunks + [chunk]
if first:
gathered = chunk
first = False
else:
gathered = gathered + chunk # type: ignore
assert len(chunks) > 1
def test_anthropic_with_empty_text_block() -> None:
"""Anthropic SDK can return an empty text block."""
@tool
def type_letter(letter: str) -> str:
"""Type the given letter."""
return "OK"
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0).bind_tools( # type: ignore[call-arg]
[type_letter]
)
messages = [
SystemMessage(
content="Repeat the given string using the provided tools. Do not write "
"anything else or provide any explanations. For example, "
"if the string is 'abc', you must print the "
"letters 'a', 'b', and 'c' one at a time and in that order. "
),
HumanMessage(content="dog"),
AIMessage(
content=[
{"text": "", "type": "text"},
{
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"input": {"letter": "d"},
"name": "type_letter",
"type": "tool_use",
},
],
tool_calls=[
{
"name": "type_letter",
"args": {"letter": "d"},
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"type": "tool_call",
},
],
),
ToolMessage(content="OK", tool_call_id="toolu_01V6d6W32QGGSmQm4BT98EKk"),
]
model.invoke(messages)
def test_with_structured_output() -> None:
llm = ChatAnthropic( # type: ignore[call-arg]
model="claude-3-opus-20240229",
)
structured_llm = llm.with_structured_output(
{
"name": "get_weather",
"description": "Get weather report for a city",
"input_schema": {
"type": "object",
"properties": {"location": {"type": "string"}},
},
}
)
response = structured_llm.invoke("what's the weather in san francisco, ca")
assert isinstance(response, dict)
assert response["location"]
def test_get_num_tokens_from_messages() -> None:
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022") # type: ignore[call-arg]
# Test simple case
messages = [
SystemMessage(content="You are a scientist"),
HumanMessage(content="Hello, Claude"),
]
num_tokens = llm.get_num_tokens_from_messages(messages)
assert num_tokens > 0
# Test tool use
@tool(parse_docstring=True)
def get_weather(location: str) -> str:
"""Get the current weather in a given location
Args:
location: The city and state, e.g. San Francisco, CA
"""
return "Sunny"
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
]
num_tokens = llm.get_num_tokens_from_messages(messages, tools=[get_weather])
assert num_tokens > 0
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
AIMessage(
content=[
{"text": "Let's see.", "type": "text"},
{
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"input": {"location": "SF"},
"name": "get_weather",
"type": "tool_use",
},
],
tool_calls=[
{
"name": "get_weather",
"args": {"location": "SF"},
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"type": "tool_call",
},
],
),
ToolMessage(content="Sunny", tool_call_id="toolu_01V6d6W32QGGSmQm4BT98EKk"),
]
num_tokens = llm.get_num_tokens_from_messages(messages, tools=[get_weather])
assert num_tokens > 0
class GetWeather(BaseModel):
"""Get the current weather in a given location"""
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
@pytest.mark.parametrize("tool_choice", ["GetWeather", "auto", "any"])
def test_anthropic_bind_tools_tool_choice(tool_choice: str) -> None:
chat_model = ChatAnthropic( # type: ignore[call-arg]
model=MODEL_NAME,
)
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice=tool_choice)
response = chat_model_with_tools.invoke("what's the weather in ny and la")
assert isinstance(response, AIMessage)
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/integration_tests/test_llms.py | """Test Anthropic API wrapper."""
from typing import Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_anthropic import Anthropic
from tests.unit_tests._utils import FakeCallbackHandler
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = Anthropic(model_name="foo")
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = Anthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams > 1
async def test_anthropic_async_generate() -> None:
"""Test async generate."""
llm = Anthropic()
output = await llm.agenerate(["How many toes do dogs have?"])
assert isinstance(output, LLMResult)
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/integration_tests/test_compile.py | import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/integration_tests/test_experimental.py | """Test ChatAnthropic chat model."""
from enum import Enum
from typing import List, Optional
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
from langchain_anthropic.experimental import ChatAnthropicTools
MODEL_NAME = "claude-3-sonnet-20240229"
BIG_MODEL_NAME = "claude-3-opus-20240229"
#####################################
### Test Basic features, no tools ###
#####################################
def test_stream() -> None:
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_astream() -> None:
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_abatch() -> None:
"""Test streaming tokens from ChatAnthropicTools."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from ChatAnthropicTools."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
def test_batch() -> None:
"""Test batch tokens from ChatAnthropicTools."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from ChatAnthropicTools."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
def test_invoke() -> None:
"""Test invoke tokens from ChatAnthropicTools."""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)
def test_system_invoke() -> None:
"""Test invoke tokens with a system message"""
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert cartographer. If asked, you are a cartographer. "
"STAY IN CHARACTER",
),
("human", "Are you a mathematician?"),
]
)
chain = prompt | llm
result = chain.invoke({})
assert isinstance(result.content, str)
##################
### Test Tools ###
##################
def test_with_structured_output() -> None:
class Person(BaseModel):
name: str
age: int
chain = ChatAnthropicTools( # type: ignore[call-arg, call-arg]
model_name=BIG_MODEL_NAME,
temperature=0,
default_headers={"anthropic-beta": "tools-2024-04-04"},
).with_structured_output(Person)
result = chain.invoke("Erick is 27 years old")
assert isinstance(result, Person)
assert result.name == "Erick"
assert result.age == 27
def test_anthropic_complex_structured_output() -> None:
class ToneEnum(str, Enum):
positive = "positive"
negative = "negative"
class Email(BaseModel):
"""Relevant information about an email."""
sender: Optional[str] = Field(
None, description="The sender's name, if available"
)
sender_phone_number: Optional[str] = Field(
None, description="The sender's phone number, if available"
)
sender_address: Optional[str] = Field(
None, description="The sender's address, if available"
)
action_items: List[str] = Field(
..., description="A list of action items requested by the email"
)
topic: str = Field(
..., description="High level description of what the email is about"
)
tone: ToneEnum = Field(..., description="The tone of the email.")
prompt = ChatPromptTemplate.from_messages(
[
(
"human",
"What can you tell me about the following email? Make sure to answer in the correct format: {email}", # noqa: E501
),
]
)
llm = ChatAnthropicTools( # type: ignore[call-arg, call-arg]
temperature=0,
model_name=BIG_MODEL_NAME,
default_headers={"anthropic-beta": "tools-2024-04-04"},
)
extraction_chain = prompt | llm.with_structured_output(Email)
response = extraction_chain.invoke(
{
"email": "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting." # noqa: E501
}
)
assert isinstance(response, Email)
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/unit_tests/_utils.py | """A fake callback handler for testing purposes."""
from typing import Any, Union
from langchain_core.callbacks import BaseCallbackHandler
from pydantic import BaseModel
class BaseFakeCallbackHandler(BaseModel):
"""Base fake callback handler for testing."""
starts: int = 0
ends: int = 0
errors: int = 0
text: int = 0
ignore_llm_: bool = False
ignore_chain_: bool = False
ignore_agent_: bool = False
ignore_retriever_: bool = False
ignore_chat_model_: bool = False
# to allow for similar callback handlers that are not technically equal
fake_id: Union[str, None] = None
# add finer-grained counters for easier debugging of failing tests
chain_starts: int = 0
chain_ends: int = 0
llm_starts: int = 0
llm_ends: int = 0
llm_streams: int = 0
tool_starts: int = 0
tool_ends: int = 0
agent_actions: int = 0
agent_ends: int = 0
chat_model_starts: int = 0
retriever_starts: int = 0
retriever_ends: int = 0
retriever_errors: int = 0
retries: int = 0
class BaseFakeCallbackHandlerMixin(BaseFakeCallbackHandler):
"""Base fake callback handler mixin for testing."""
def on_llm_start_common(self) -> None:
self.llm_starts += 1
self.starts += 1
def on_llm_end_common(self) -> None:
self.llm_ends += 1
self.ends += 1
def on_llm_error_common(self) -> None:
self.errors += 1
def on_llm_new_token_common(self) -> None:
self.llm_streams += 1
def on_retry_common(self) -> None:
self.retries += 1
def on_chain_start_common(self) -> None:
self.chain_starts += 1
self.starts += 1
def on_chain_end_common(self) -> None:
self.chain_ends += 1
self.ends += 1
def on_chain_error_common(self) -> None:
self.errors += 1
def on_tool_start_common(self) -> None:
self.tool_starts += 1
self.starts += 1
def on_tool_end_common(self) -> None:
self.tool_ends += 1
self.ends += 1
def on_tool_error_common(self) -> None:
self.errors += 1
def on_agent_action_common(self) -> None:
self.agent_actions += 1
self.starts += 1
def on_agent_finish_common(self) -> None:
self.agent_ends += 1
self.ends += 1
def on_chat_model_start_common(self) -> None:
self.chat_model_starts += 1
self.starts += 1
def on_text_common(self) -> None:
self.text += 1
def on_retriever_start_common(self) -> None:
self.starts += 1
self.retriever_starts += 1
def on_retriever_end_common(self) -> None:
self.ends += 1
self.retriever_ends += 1
def on_retriever_error_common(self) -> None:
self.errors += 1
self.retriever_errors += 1
class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_start_common()
def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_new_token_common()
def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_end_common()
def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_error_common()
def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_start_common()
def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_end_common()
def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_error_common()
def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_start_common()
def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_end_common()
def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_error_common()
def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_action_common()
def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_finish_common()
def on_text(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_text_common()
def on_retriever_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_start_common()
def on_retriever_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_end_common()
def on_retriever_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_error_common()
# Overriding since BaseModel has __deepcopy__ method as well
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore
return self
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/unit_tests/test_output_parsers.py | from typing import Any, List, Literal
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
from pydantic import BaseModel
from langchain_anthropic.output_parsers import ToolsOutputParser
_CONTENT: List = [
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"bar": 0}, "id": "1", "name": "_Foo1"},
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"baz": "a"}, "id": "2", "name": "_Foo2"},
]
_RESULT: List = [ChatGeneration(message=AIMessage(_CONTENT))] # type: ignore[misc]
class _Foo1(BaseModel):
bar: int
class _Foo2(BaseModel):
baz: Literal["a", "b"]
def test_tools_output_parser() -> None:
output_parser = ToolsOutputParser()
expected = [
{
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
},
{
"name": "_Foo2",
"args": {"baz": "a"},
"id": "2",
"index": 3,
"type": "tool_call",
},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_args_only() -> None:
output_parser = ToolsOutputParser(args_only=True)
expected = [
{"bar": 0},
{"baz": "a"},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = []
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_first_tool_only() -> None:
output_parser = ToolsOutputParser(first_tool_only=True)
expected: Any = {
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
}
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = None
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_pydantic() -> None:
output_parser = ToolsOutputParser(pydantic_schemas=[_Foo1, _Foo2])
expected = [_Foo1(bar=0), _Foo2(baz="a")]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_empty_content() -> None:
class ChartType(BaseModel):
chart_type: Literal["pie", "line", "bar"]
output_parser = ToolsOutputParser(
first_tool_only=True, pydantic_schemas=[ChartType]
)
message = AIMessage(
"",
tool_calls=[
{
"name": "ChartType",
"args": {"chart_type": "pie"},
"id": "foo",
"type": "tool_call",
}
],
)
actual = output_parser.invoke(message)
expected = ChartType(chart_type="pie")
assert expected == actual
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/unit_tests/test_standard.py | """Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/unit_tests/test_chat_models.py | """Test chat model integration."""
import os
from typing import Any, Callable, Dict, Literal, Type, cast
import pytest
from anthropic.types import Message, TextBlock, Usage
from anthropic.types.beta.prompt_caching import (
PromptCachingBetaMessage,
PromptCachingBetaUsage,
)
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_core.runnables import RunnableBinding
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_anthropic import ChatAnthropic
from langchain_anthropic.chat_models import (
_format_messages,
_merge_messages,
convert_to_anthropic_tool,
)
os.environ["ANTHROPIC_API_KEY"] = "foo"
def test_initialization() -> None:
"""Test chat model initialization."""
for model in [
ChatAnthropic(model_name="claude-instant-1.2", api_key="xyz", timeout=2), # type: ignore[arg-type, call-arg]
ChatAnthropic( # type: ignore[call-arg, call-arg, call-arg]
model="claude-instant-1.2",
anthropic_api_key="xyz",
default_request_timeout=2,
base_url="https://api.anthropic.com",
),
]:
assert model.model == "claude-instant-1.2"
assert cast(SecretStr, model.anthropic_api_key).get_secret_value() == "xyz"
assert model.default_request_timeout == 2.0
assert model.anthropic_api_url == "https://api.anthropic.com"
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = ChatAnthropic(model_name="foo") # type: ignore[call-arg, call-arg]
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = ChatAnthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_kwargs() -> None:
llm = ChatAnthropic(model_name="foo", model_kwargs={"foo": "bar"}) # type: ignore[call-arg, call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("anthropic")
def test_anthropic_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = ChatAnthropic(model="foo", model_kwargs={"max_tokens_to_sample": 5}) # type: ignore[call-arg]
assert llm.max_tokens == 5
llm = ChatAnthropic(model="foo", model_kwargs={"max_tokens": 5}) # type: ignore[call-arg]
assert llm.max_tokens == 5
@pytest.mark.requires("anthropic")
def test_anthropic_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = ChatAnthropic(model="foo", foo="bar") # type: ignore[call-arg, call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("anthropic")
def test_anthropic_initialization() -> None:
"""Test anthropic initialization."""
# Verify that chat anthropic can be initialized using a secret key provided
# as a parameter rather than an environment variable.
ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[call-arg, call-arg]
def test__format_output() -> None:
anthropic_msg = Message(
id="foo",
content=[TextBlock(type="text", text="bar")],
model="baz",
role="assistant",
stop_reason=None,
stop_sequence=None,
usage=Usage(input_tokens=2, output_tokens=1),
type="message",
)
expected = AIMessage( # type: ignore[misc]
"bar",
usage_metadata={
"input_tokens": 2,
"output_tokens": 1,
"total_tokens": 3,
"input_token_details": {},
},
)
llm = ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[call-arg, call-arg]
actual = llm._format_output(anthropic_msg)
assert actual.generations[0].message == expected
def test__format_output_cached() -> None:
anthropic_msg = PromptCachingBetaMessage(
id="foo",
content=[TextBlock(type="text", text="bar")],
model="baz",
role="assistant",
stop_reason=None,
stop_sequence=None,
usage=PromptCachingBetaUsage(
input_tokens=2,
output_tokens=1,
cache_creation_input_tokens=3,
cache_read_input_tokens=4,
),
type="message",
)
expected = AIMessage( # type: ignore[misc]
"bar",
usage_metadata={
"input_tokens": 9,
"output_tokens": 1,
"total_tokens": 10,
"input_token_details": {"cache_creation": 3, "cache_read": 4},
},
)
llm = ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[call-arg, call-arg]
actual = llm._format_output(anthropic_msg)
assert actual.generations[0].message == expected
def test__merge_messages() -> None:
messages = [
SystemMessage("foo"), # type: ignore[misc]
HumanMessage("bar"), # type: ignore[misc]
AIMessage( # type: ignore[misc]
[
{"text": "baz", "type": "text"},
{
"tool_input": {"a": "b"},
"type": "tool_use",
"id": "1",
"text": None,
"name": "buz",
},
{"text": "baz", "type": "text"},
{
"tool_input": {"a": "c"},
"type": "tool_use",
"id": "2",
"text": None,
"name": "blah",
},
{
"tool_input": {"a": "c"},
"type": "tool_use",
"id": "3",
"text": None,
"name": "blah",
},
]
),
ToolMessage("buz output", tool_call_id="1", status="error"), # type: ignore[misc]
ToolMessage(
content=[
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "fake_image_data",
},
},
],
tool_call_id="2",
), # type: ignore[misc]
ToolMessage([], tool_call_id="3"), # type: ignore[misc]
HumanMessage("next thing"), # type: ignore[misc]
]
expected = [
SystemMessage("foo"), # type: ignore[misc]
HumanMessage("bar"), # type: ignore[misc]
AIMessage( # type: ignore[misc]
[
{"text": "baz", "type": "text"},
{
"tool_input": {"a": "b"},
"type": "tool_use",
"id": "1",
"text": None,
"name": "buz",
},
{"text": "baz", "type": "text"},
{
"tool_input": {"a": "c"},
"type": "tool_use",
"id": "2",
"text": None,
"name": "blah",
},
{
"tool_input": {"a": "c"},
"type": "tool_use",
"id": "3",
"text": None,
"name": "blah",
},
]
),
HumanMessage( # type: ignore[misc]
[
{
"type": "tool_result",
"content": "buz output",
"tool_use_id": "1",
"is_error": True,
},
{
"type": "tool_result",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "fake_image_data",
},
},
],
"tool_use_id": "2",
"is_error": False,
},
{
"type": "tool_result",
"content": [],
"tool_use_id": "3",
"is_error": False,
},
{"type": "text", "text": "next thing"},
]
),
]
actual = _merge_messages(messages)
assert expected == actual
# Test tool message case
messages = [
ToolMessage("buz output", tool_call_id="1"), # type: ignore[misc]
ToolMessage( # type: ignore[misc]
content=[
{"type": "tool_result", "content": "blah output", "tool_use_id": "2"}
],
tool_call_id="2",
),
]
expected = [
HumanMessage( # type: ignore[misc]
[
{
"type": "tool_result",
"content": "buz output",
"tool_use_id": "1",
"is_error": False,
},
{"type": "tool_result", "content": "blah output", "tool_use_id": "2"},
]
)
]
actual = _merge_messages(messages)
assert expected == actual
def test__merge_messages_mutation() -> None:
original_messages = [
HumanMessage([{"type": "text", "text": "bar"}]), # type: ignore[misc]
HumanMessage("next thing"), # type: ignore[misc]
]
messages = [
HumanMessage([{"type": "text", "text": "bar"}]), # type: ignore[misc]
HumanMessage("next thing"), # type: ignore[misc]
]
expected = [
HumanMessage( # type: ignore[misc]
[{"type": "text", "text": "bar"}, {"type": "text", "text": "next thing"}]
),
]
actual = _merge_messages(messages)
assert expected == actual
assert messages == original_messages
@pytest.fixture()
def pydantic() -> Type[BaseModel]:
class dummy_function(BaseModel):
"""dummy function"""
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
return dummy_function
@pytest.fixture()
def function() -> Callable:
def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
pass
return dummy_function
@pytest.fixture()
def dummy_tool() -> BaseTool:
class Schema(BaseModel):
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
class DummyFunction(BaseTool): # type: ignore[override]
args_schema: Type[BaseModel] = Schema
name: str = "dummy_function"
description: str = "dummy function"
def _run(self, *args: Any, **kwargs: Any) -> Any:
pass
return DummyFunction()
@pytest.fixture()
def json_schema() -> Dict:
return {
"title": "dummy_function",
"description": "dummy function",
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
@pytest.fixture()
def openai_function() -> Dict:
return {
"name": "dummy_function",
"description": "dummy function",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
def test_convert_to_anthropic_tool(
pydantic: Type[BaseModel],
function: Callable,
dummy_tool: BaseTool,
json_schema: Dict,
openai_function: Dict,
) -> None:
expected = {
"name": "dummy_function",
"description": "dummy function",
"input_schema": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
for fn in (pydantic, function, dummy_tool, json_schema, expected, openai_function):
actual = convert_to_anthropic_tool(fn) # type: ignore
assert actual == expected
def test__format_messages_with_tool_calls() -> None:
system = SystemMessage("fuzz") # type: ignore[misc]
human = HumanMessage("foo") # type: ignore[misc]
ai = AIMessage(
"", # with empty string
tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}],
)
ai2 = AIMessage(
[], # with empty list
tool_calls=[{"name": "bar", "id": "2", "args": {"baz": "buzz"}}],
)
tool = ToolMessage(
"blurb",
tool_call_id="1",
)
tool_image_url = ToolMessage(
[{"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,...."}}],
tool_call_id="2",
)
tool_image = ToolMessage(
[
{
"type": "image",
"source": {
"data": "....",
"type": "base64",
"media_type": "image/jpeg",
},
}
],
tool_call_id="3",
)
messages = [system, human, ai, tool, ai2, tool_image_url, tool_image]
expected = (
"fuzz",
[
{"role": "user", "content": "foo"},
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"name": "bar",
"id": "1",
"input": {"baz": "buzz"},
}
],
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"content": "blurb",
"tool_use_id": "1",
"is_error": False,
}
],
},
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"name": "bar",
"id": "2",
"input": {"baz": "buzz"},
}
],
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"content": [
{
"type": "image",
"source": {
"data": "....",
"type": "base64",
"media_type": "image/jpeg",
},
}
],
"tool_use_id": "2",
"is_error": False,
},
{
"type": "tool_result",
"content": [
{
"type": "image",
"source": {
"data": "....",
"type": "base64",
"media_type": "image/jpeg",
},
}
],
"tool_use_id": "3",
"is_error": False,
},
],
},
],
)
actual = _format_messages(messages)
assert expected == actual
def test__format_messages_with_str_content_and_tool_calls() -> None:
system = SystemMessage("fuzz") # type: ignore[misc]
human = HumanMessage("foo") # type: ignore[misc]
# If content and tool_calls are specified and content is a string, then both are
# included with content first.
ai = AIMessage( # type: ignore[misc]
"thought",
tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}],
)
tool = ToolMessage("blurb", tool_call_id="1") # type: ignore[misc]
messages = [system, human, ai, tool]
expected = (
"fuzz",
[
{"role": "user", "content": "foo"},
{
"role": "assistant",
"content": [
{"type": "text", "text": "thought"},
{
"type": "tool_use",
"name": "bar",
"id": "1",
"input": {"baz": "buzz"},
},
],
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"content": "blurb",
"tool_use_id": "1",
"is_error": False,
}
],
},
],
)
actual = _format_messages(messages)
assert expected == actual
def test__format_messages_with_list_content_and_tool_calls() -> None:
system = SystemMessage("fuzz") # type: ignore[misc]
human = HumanMessage("foo") # type: ignore[misc]
ai = AIMessage( # type: ignore[misc]
[{"type": "text", "text": "thought"}],
tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}],
)
tool = ToolMessage( # type: ignore[misc]
"blurb",
tool_call_id="1",
)
messages = [system, human, ai, tool]
expected = (
"fuzz",
[
{"role": "user", "content": "foo"},
{
"role": "assistant",
"content": [
{"type": "text", "text": "thought"},
{
"type": "tool_use",
"name": "bar",
"id": "1",
"input": {"baz": "buzz"},
},
],
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"content": "blurb",
"tool_use_id": "1",
"is_error": False,
}
],
},
],
)
actual = _format_messages(messages)
assert expected == actual
def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None:
"""Show that tool_calls are preferred to tool_use blocks when both have same id."""
system = SystemMessage("fuzz") # type: ignore[misc]
human = HumanMessage("foo") # type: ignore[misc]
# NOTE: tool_use block in contents and tool_calls have different arguments.
ai = AIMessage( # type: ignore[misc]
[
{"type": "text", "text": "thought"},
{
"type": "tool_use",
"name": "bar",
"id": "1",
"input": {"baz": "NOT_BUZZ"},
},
],
tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "BUZZ"}}],
)
tool = ToolMessage("blurb", tool_call_id="1") # type: ignore[misc]
messages = [system, human, ai, tool]
expected = (
"fuzz",
[
{"role": "user", "content": "foo"},
{
"role": "assistant",
"content": [
{"type": "text", "text": "thought"},
{
"type": "tool_use",
"name": "bar",
"id": "1",
"input": {"baz": "BUZZ"}, # tool_calls value preferred.
},
],
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"content": "blurb",
"tool_use_id": "1",
"is_error": False,
}
],
},
],
)
actual = _format_messages(messages)
assert expected == actual
def test__format_messages_with_cache_control() -> None:
messages = [
SystemMessage(
[
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
]
),
HumanMessage(
[
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
{
"type": "text",
"text": "foo",
},
]
),
]
expected_system = [
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}}
]
expected_messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
{"type": "text", "text": "foo"},
],
}
]
actual_system, actual_messages = _format_messages(messages)
assert expected_system == actual_system
assert expected_messages == actual_messages
def test__format_messages_with_multiple_system() -> None:
messages = [
HumanMessage("baz"),
SystemMessage("bar"),
SystemMessage("baz"),
SystemMessage(
[
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
]
),
]
expected_system = [
{"type": "text", "text": "bar"},
{"type": "text", "text": "baz"},
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
]
expected_messages = [{"role": "user", "content": "baz"}]
actual_system, actual_messages = _format_messages(messages)
assert expected_system == actual_system
assert expected_messages == actual_messages
def test_anthropic_api_key_is_secret_string() -> None:
"""Test that the API key is stored as a SecretStr."""
chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg]
model="claude-3-opus-20240229",
anthropic_api_key="secret-api-key",
)
assert isinstance(chat_model.anthropic_api_key, SecretStr)
def test_anthropic_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked when passed from an environment variable."""
monkeypatch.setenv("ANTHROPIC_API_KEY ", "secret-api-key")
chat_model = ChatAnthropic( # type: ignore[call-arg]
model="claude-3-opus-20240229",
)
print(chat_model.anthropic_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_anthropic_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test that the API key is masked when passed via the constructor."""
chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg]
model="claude-3-opus-20240229",
anthropic_api_key="secret-api-key",
)
print(chat_model.anthropic_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_anthropic_uses_actual_secret_value_from_secretstr() -> None:
"""Test that the actual secret value is correctly retrieved."""
chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg]
model="claude-3-opus-20240229",
anthropic_api_key="secret-api-key",
)
assert (
cast(SecretStr, chat_model.anthropic_api_key).get_secret_value()
== "secret-api-key"
)
class GetWeather(BaseModel):
"""Get the current weather in a given location"""
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
def test_anthropic_bind_tools_tool_choice() -> None:
chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg]
model="claude-3-opus-20240229",
anthropic_api_key="secret-api-key",
)
chat_model_with_tools = chat_model.bind_tools(
[GetWeather], tool_choice={"type": "tool", "name": "GetWeather"}
)
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
"type": "tool",
"name": "GetWeather",
}
chat_model_with_tools = chat_model.bind_tools(
[GetWeather], tool_choice="GetWeather"
)
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
"type": "tool",
"name": "GetWeather",
}
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="auto")
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
"type": "auto"
}
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="any")
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
"type": "any"
}
|
0 | lc_public_repos/langchain/libs/partners/anthropic/tests | lc_public_repos/langchain/libs/partners/anthropic/tests/unit_tests/test_imports.py | from langchain_anthropic import __all__
EXPECTED_ALL = ["ChatAnthropicMessages", "ChatAnthropic", "Anthropic", "AnthropicLLM"]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.