index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/__init__.py | """**Docstores** are classes to store and load Documents.
The **Docstore** is a simplified version of the Document Loader.
**Class hierarchy:**
.. code-block::
Docstore --> <name> # Examples: InMemoryDocstore, Wikipedia
**Main helpers:**
.. code-block::
Document, AddableMixin
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.docstore.arbitrary_fn import (
DocstoreFn,
)
from langchain_community.docstore.in_memory import (
InMemoryDocstore,
)
from langchain_community.docstore.wikipedia import (
Wikipedia,
)
_module_lookup = {
"DocstoreFn": "langchain_community.docstore.arbitrary_fn",
"InMemoryDocstore": "langchain_community.docstore.in_memory",
"Wikipedia": "langchain_community.docstore.wikipedia",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = ["DocstoreFn", "InMemoryDocstore", "Wikipedia"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chains/__init__.py | """
Chains module for langchain_community
This module contains the community chains.
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.chains.pebblo_retrieval.base import PebbloRetrievalQA
__all__ = ["PebbloRetrievalQA"]
_module_lookup = {
"PebbloRetrievalQA": "langchain_community.chains.pebblo_retrieval.base"
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chains/llm_requests.py | """Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain_core.callbacks import CallbackManagerForChainRun
from pydantic import ConfigDict, Field, model_validator
from langchain_community.utilities.requests import TextRequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain):
"""Chain that requests a URL and then uses an LLM to parse results.
**Security Note**: This chain can make GET requests to arbitrary URLs,
including internal URLs.
Control access to who can run this chain and what network access
this chain has.
See https://python.langchain.com/docs/security for more information.
"""
llm_chain: LLMChain # type: ignore[valid-type]
requests_wrapper: TextRequestsWrapper = Field(
default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS),
exclude=True,
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"Could not import bs4 python package. "
"Please install it with `pip install bs4`."
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict( # type: ignore[attr-defined]
callbacks=_run_manager.get_child(), **other_keys
)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/ernie_functions/base.py | """Methods for creating chains that use Ernie function-calling APIs."""
import inspect
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from langchain.chains import LLMChain
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel
from langchain_community.output_parsers.ernie_functions import (
JsonOutputFunctionsParser,
PydanticAttrOutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_community.utils.ernie_functions import convert_pydantic_to_ernie_function
PYTHON_TO_JSON_TYPES = {
"str": "string",
"int": "number",
"float": "number",
"bool": "boolean",
}
def _get_python_function_name(function: Callable) -> str:
"""Get the name of a Python function."""
return function.__name__
def _parse_python_function_docstring(function: Callable) -> Tuple[str, dict]:
"""Parse the function and argument descriptions from the docstring of a function.
Assumes the function docstring follows Google Python style guide.
"""
docstring = inspect.getdoc(function)
if docstring:
docstring_blocks = docstring.split("\n\n")
descriptors = []
args_block = None
past_descriptors = False
for block in docstring_blocks:
if block.startswith("Args:"):
args_block = block
break
elif block.startswith("Returns:") or block.startswith("Example:"):
# Don't break in case Args come after
past_descriptors = True
elif not past_descriptors:
descriptors.append(block)
else:
continue
description = " ".join(descriptors)
else:
description = ""
args_block = None
arg_descriptions = {}
if args_block:
arg = None
for line in args_block.split("\n")[1:]:
if ":" in line:
arg, desc = line.split(":")
arg_descriptions[arg.strip()] = desc.strip()
elif arg:
arg_descriptions[arg.strip()] += " " + line.strip()
return description, arg_descriptions
def _get_python_function_arguments(function: Callable, arg_descriptions: dict) -> dict:
"""Get JsonSchema describing a Python functions arguments.
Assumes all function arguments are of primitive types (int, float, str, bool) or
are subclasses of pydantic.BaseModel.
"""
properties = {}
annotations = inspect.getfullargspec(function).annotations
for arg, arg_type in annotations.items():
if arg == "return":
continue
if isinstance(arg_type, type) and is_basemodel_subclass(arg_type):
# Mypy error:
# "type" has no attribute "schema"
properties[arg] = arg_type.schema() # type: ignore[attr-defined]
elif arg_type.__name__ in PYTHON_TO_JSON_TYPES:
properties[arg] = {"type": PYTHON_TO_JSON_TYPES[arg_type.__name__]}
if arg in arg_descriptions:
if arg not in properties:
properties[arg] = {}
properties[arg]["description"] = arg_descriptions[arg]
return properties
def _get_python_function_required_args(function: Callable) -> List[str]:
"""Get the required arguments for a Python function."""
spec = inspect.getfullargspec(function)
required = spec.args[: -len(spec.defaults)] if spec.defaults else spec.args
required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {})]
is_class = type(function) is type
if is_class and required[0] == "self":
required = required[1:]
return required
def convert_python_function_to_ernie_function(
function: Callable,
) -> Dict[str, Any]:
"""Convert a Python function to an Ernie function-calling API compatible dict.
Assumes the Python function has type hints and a docstring with a description. If
the docstring has Google Python style argument descriptions, these will be
included as well.
"""
description, arg_descriptions = _parse_python_function_docstring(function)
return {
"name": _get_python_function_name(function),
"description": description,
"parameters": {
"type": "object",
"properties": _get_python_function_arguments(function, arg_descriptions),
"required": _get_python_function_required_args(function),
},
}
def convert_to_ernie_function(
function: Union[Dict[str, Any], Type[BaseModel], Callable],
) -> Dict[str, Any]:
"""Convert a raw function/class to an Ernie function.
Args:
function: Either a dictionary, a pydantic.BaseModel class, or a Python function.
If a dictionary is passed in, it is assumed to already be a valid Ernie
function.
Returns:
A dict version of the passed in function which is compatible with the
Ernie function-calling API.
"""
if isinstance(function, dict):
return function
elif isinstance(function, type) and is_basemodel_subclass(function):
return cast(Dict, convert_pydantic_to_ernie_function(function))
elif callable(function):
return convert_python_function_to_ernie_function(function)
else:
raise ValueError(
f"Unsupported function type {type(function)}. Functions must be passed in"
f" as Dict, pydantic.BaseModel, or Callable."
)
def get_ernie_output_parser(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
"""Get the appropriate function output parser given the user functions.
Args:
functions: Sequence where element is a dictionary, a pydantic.BaseModel class,
or a Python function. If a dictionary is passed in, it is assumed to
already be a valid Ernie function.
Returns:
A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise
a JsonOutputFunctionsParser. If there's only one function and it is
not a Pydantic class, then the output parser will automatically extract
only the function arguments and not the function name.
"""
function_names = [convert_to_ernie_function(f)["name"] for f in functions]
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
if len(functions) > 1:
pydantic_schema: Union[Dict, Type[BaseModel]] = {
name: fn for name, fn in zip(function_names, functions)
}
else:
pydantic_schema = functions[0]
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
)
else:
output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
return output_parser
def create_ernie_fn_runnable(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
llm: Runnable,
prompt: BasePromptTemplate,
*,
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
**kwargs: Any,
) -> Runnable:
"""Create a runnable sequence that uses Ernie functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid Ernie functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
A runnable sequence that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_ernie_fn_chain
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
\"\"\"Record some identifying information about a person.\"\"\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
\"\"\"Record some identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Make calls to the relevant function to record the entities in the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_ernie_fn_runnable([RecordPerson, RecordDog], llm, prompt)
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if not functions:
raise ValueError("Need to pass in at least one function. Received zero.")
ernie_functions = [convert_to_ernie_function(f) for f in functions]
llm_kwargs: Dict[str, Any] = {"functions": ernie_functions, **kwargs}
if len(ernie_functions) == 1:
llm_kwargs["function_call"] = {"name": ernie_functions[0]["name"]}
output_parser = output_parser or get_ernie_output_parser(functions)
return prompt | llm.bind(**llm_kwargs) | output_parser
def create_structured_output_runnable(
output_schema: Union[Dict[str, Any], Type[BaseModel]],
llm: Runnable,
prompt: BasePromptTemplate,
*,
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
**kwargs: Any,
) -> Runnable:
"""Create a runnable that uses an Ernie function to get a structured output.
Args:
output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary
is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON.
Returns:
A runnable sequence that will pass the given function to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_structured_output_chain
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class Dog(BaseModel):
\"\"\"Identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Use the given format to extract information from the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_structured_output_chain(Dog, llm, prompt)
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> Dog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if isinstance(output_schema, dict):
function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": output_schema,
}
else:
class _OutputFormatter(BaseModel):
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
output: output_schema # type: ignore
function = _OutputFormatter
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
pydantic_schema=_OutputFormatter, attr_name="output"
)
return create_ernie_fn_runnable(
[function],
llm,
prompt,
output_parser=output_parser,
**kwargs,
)
""" --- Legacy --- """
def create_ernie_fn_chain(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain: # type: ignore[valid-type]
"""[Legacy] Create an LLM chain that uses Ernie functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid Ernie functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_ernie_fn_chain
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
\"\"\"Record some identifying information about a person.\"\"\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
\"\"\"Record some identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Make calls to the relevant function to record the entities in the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_ernie_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if not functions:
raise ValueError("Need to pass in at least one function. Received zero.")
ernie_functions = [convert_to_ernie_function(f) for f in functions]
output_parser = output_parser or get_ernie_output_parser(functions)
llm_kwargs: Dict[str, Any] = {
"functions": ernie_functions,
}
if len(ernie_functions) == 1:
llm_kwargs["function_call"] = {"name": ernie_functions[0]["name"]}
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
output_parser=output_parser,
llm_kwargs=llm_kwargs,
output_key=output_key,
**kwargs,
)
return llm_chain
def create_structured_output_chain(
output_schema: Union[Dict[str, Any], Type[BaseModel]],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain: # type: ignore[valid-type]
"""[Legacy] Create an LLMChain that uses an Ernie function to get a structured output.
Args:
output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary
is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters.
llm: Language model to use, assumed to support the Ernie function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON.
Returns:
An LLMChain that will pass the given function to the model.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.ernie_functions import create_structured_output_chain
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class Dog(BaseModel):
\"\"\"Identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ErnieBotChat(model_name="ERNIE-Bot-4")
prompt = ChatPromptTemplate.from_messages(
[
("user", "Use the given format to extract information from the following input: {input}"),
("assistant", "OK!"),
("user", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_structured_output_chain(Dog, llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if isinstance(output_schema, dict):
function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": output_schema,
}
else:
class _OutputFormatter(BaseModel):
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
output: output_schema # type: ignore
function = _OutputFormatter
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
pydantic_schema=_OutputFormatter, attr_name="output"
)
return create_ernie_fn_chain(
[function],
llm,
prompt,
output_key=output_key,
output_parser=output_parser,
**kwargs,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/ernie_functions/__init__.py | from langchain.chains.ernie_functions.base import (
convert_to_ernie_function,
create_ernie_fn_chain,
create_ernie_fn_runnable,
create_structured_output_chain,
create_structured_output_runnable,
get_ernie_output_parser,
)
__all__ = [
"convert_to_ernie_function",
"create_structured_output_chain",
"create_ernie_fn_chain",
"create_structured_output_runnable",
"create_ernie_fn_runnable",
"get_ernie_output_parser",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/arangodb.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_PROMPT,
AQL_GENERATION_PROMPT,
AQL_QA_PROMPT,
)
from langchain_community.graphs.arangodb_graph import ArangoGraph
class ArangoGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating AQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: ArangoGraph = Field(exclude=True)
aql_generation_chain: LLMChain
aql_fix_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
# Specifies the maximum number of AQL Query Results to return
top_k: int = 10
# Specifies the set of AQL Query Examples that promote few-shot-learning
aql_examples: str = ""
# Specify whether to return the AQL Query in the output dictionary
return_aql_query: bool = False
# Specify whether to return the AQL JSON Result in the output dictionary
return_aql_result: bool = False
# Specify the maximum amount of AQL Generation attempts that should be made
max_aql_generation_attempts: int = 3
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
@property
def _chain_type(self) -> str:
return "graph_aql_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = AQL_QA_PROMPT,
aql_generation_prompt: BasePromptTemplate = AQL_GENERATION_PROMPT,
aql_fix_prompt: BasePromptTemplate = AQL_FIX_PROMPT,
**kwargs: Any,
) -> ArangoGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt)
aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt)
return cls(
qa_chain=qa_chain,
aql_generation_chain=aql_generation_chain,
aql_fix_chain=aql_fix_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""
Generate an AQL statement from user input, use it retrieve a response
from an ArangoDB Database instance, and respond to the user input
in natural language.
Users can modify the following ArangoGraphQAChain Class Variables:
:var top_k: The maximum number of AQL Query Results to return
:type top_k: int
:var aql_examples: A set of AQL Query Examples that are passed to
the AQL Generation Prompt Template to promote few-shot-learning.
Defaults to an empty string.
:type aql_examples: str
:var return_aql_query: Whether to return the AQL Query in the
output dictionary. Defaults to False.
:type return_aql_query: bool
:var return_aql_result: Whether to return the AQL Query in the
output dictionary. Defaults to False
:type return_aql_result: bool
:var max_aql_generation_attempts: The maximum amount of AQL
Generation attempts to be made prior to raising the last
AQL Query Execution Error. Defaults to 3.
:type max_aql_generation_attempts: int
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
user_input = inputs[self.input_key]
#########################
# Generate AQL Query #
aql_generation_output = self.aql_generation_chain.run(
{
"adb_schema": self.graph.schema,
"aql_examples": self.aql_examples,
"user_input": user_input,
},
callbacks=callbacks,
)
#########################
aql_query = ""
aql_error = ""
aql_result = None
aql_generation_attempt = 1
while (
aql_result is None
and aql_generation_attempt < self.max_aql_generation_attempts + 1
):
#####################
# Extract AQL Query #
pattern = r"```(?i:aql)?(.*?)```"
matches = re.findall(pattern, aql_generation_output, re.DOTALL)
if not matches:
_run_manager.on_text(
"Invalid Response: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_generation_output, color="red", end="\n", verbose=self.verbose
)
raise ValueError(f"Response is Invalid: {aql_generation_output}")
aql_query = matches[0]
#####################
_run_manager.on_text(
f"AQL Query ({aql_generation_attempt}):", verbose=self.verbose
)
_run_manager.on_text(
aql_query, color="green", end="\n", verbose=self.verbose
)
#####################
# Execute AQL Query #
from arango import AQLQueryExecuteError
try:
aql_result = self.graph.query(aql_query, self.top_k)
except AQLQueryExecuteError as e:
aql_error = e.error_message
_run_manager.on_text(
"AQL Query Execution Error: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_error, color="yellow", end="\n\n", verbose=self.verbose
)
########################
# Retry AQL Generation #
aql_generation_output = self.aql_fix_chain.run(
{
"adb_schema": self.graph.schema,
"aql_query": aql_query,
"aql_error": aql_error,
},
callbacks=callbacks,
)
########################
#####################
aql_generation_attempt += 1
if aql_result is None:
m = f"""
Maximum amount of AQL Query Generation attempts reached.
Unable to execute the AQL Query due to the following error:
{aql_error}
"""
raise ValueError(m)
_run_manager.on_text("AQL Result:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(aql_result), color="green", end="\n", verbose=self.verbose
)
########################
# Interpret AQL Result #
result = self.qa_chain(
{
"adb_schema": self.graph.schema,
"user_input": user_input,
"aql_query": aql_query,
"aql_result": aql_result,
},
callbacks=callbacks,
)
########################
# Return results #
result = {self.output_key: result[self.qa_chain.output_key]}
if self.return_aql_query:
result["aql_query"] = aql_query
if self.return_aql_result:
result["aql_result"] = aql_result
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/hugegraph.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
GREMLIN_GENERATION_PROMPT,
)
from langchain_community.graphs.hugegraph import HugeGraph
class HugeGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating gremlin statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: HugeGraph = Field(exclude=True)
gremlin_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT,
**kwargs: Any,
) -> HugeGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt)
return cls(
qa_chain=qa_chain,
gremlin_generation_chain=gremlin_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate gremlin statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_gremlin = self.gremlin_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_gremlin, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_gremlin)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/base.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks.manager import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
ENTITY_EXTRACTION_PROMPT,
GRAPH_QA_PROMPT,
)
from langchain_community.graphs.networkx_graph import NetworkxEntityGraph, get_entities
class GraphQAChain(Chain):
"""Chain for question-answering against a graph.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: NetworkxEntityGraph = Field(exclude=True)
entity_extraction_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
qa_prompt: BasePromptTemplate = GRAPH_QA_PROMPT,
entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT,
**kwargs: Any,
) -> GraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
entity_chain = LLMChain(llm=llm, prompt=entity_prompt)
return cls(
qa_chain=qa_chain,
entity_extraction_chain=entity_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Extract entities, look up info and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
entity_string = self.entity_extraction_chain.run(question)
_run_manager.on_text("Entities Extracted:", end="\n", verbose=self.verbose)
_run_manager.on_text(
entity_string, color="green", end="\n", verbose=self.verbose
)
entities = get_entities(entity_string)
context = ""
all_triplets = []
for entity in entities:
all_triplets.extend(self.graph.get_entity_knowledge(entity))
context = "\n".join(all_triplets)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(context, color="green", end="\n", verbose=self.verbose)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=_run_manager.get_child(),
)
return {self.output_key: result[self.qa_chain.output_key]}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/falkordb.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_GENERATION_PROMPT,
CYPHER_QA_PROMPT,
)
from langchain_community.graphs import FalkorDBGraph
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def extract_cypher(text: str) -> str:
"""
Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
class FalkorDBQAChain(Chain):
"""Chain for question-answering against a graph by generating Cypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: FalkorDBGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
"""Number of results to return from the query"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@property
def _chain_type(self) -> str:
return "graph_cypher_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT,
**kwargs: Any,
) -> FalkorDBQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph.schema}, callbacks=callbacks
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
# Retrieve and limit the number of results
context = self.graph.query(generated_cypher)[: self.top_k]
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/cypher.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional, Union
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.runnables import Runnable
from pydantic import Field
from langchain_community.chains.graph_qa.cypher_utils import (
CypherQueryCorrector,
Schema,
)
from langchain_community.chains.graph_qa.prompts import (
CYPHER_GENERATION_PROMPT,
CYPHER_QA_PROMPT,
)
from langchain_community.graphs.graph_store import GraphStore
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
FUNCTION_RESPONSE_SYSTEM = """You are an assistant that helps to form nice and human
understandable answers based on the provided information from tools.
Do not add any other information that wasn't present in the tools, and use
very concise style in interpreting results!
"""
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.chains.graph_qa.cypher.extract_cypher",
)
def extract_cypher(text: str) -> str:
"""Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.chains.graph_qa.cypher.construct_schema",
)
def construct_schema(
structured_schema: Dict[str, Any],
include_types: List[str],
exclude_types: List[str],
) -> str:
"""Filter the schema based on included or excluded types"""
def filter_func(x: str) -> bool:
return x in include_types if include_types else x not in exclude_types
filtered_schema: Dict[str, Any] = {
"node_props": {
k: v
for k, v in structured_schema.get("node_props", {}).items()
if filter_func(k)
},
"rel_props": {
k: v
for k, v in structured_schema.get("rel_props", {}).items()
if filter_func(k)
},
"relationships": [
r
for r in structured_schema.get("relationships", [])
if all(filter_func(r[t]) for t in ["start", "end", "type"])
],
}
# Format node properties
formatted_node_props = []
for label, properties in filtered_schema["node_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_node_props.append(f"{label} {{{props_str}}}")
# Format relationship properties
formatted_rel_props = []
for rel_type, properties in filtered_schema["rel_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_rel_props.append(f"{rel_type} {{{props_str}}}")
# Format relationships
formatted_rels = [
f"(:{el['start']})-[:{el['type']}]->(:{el['end']})"
for el in filtered_schema["relationships"]
]
return "\n".join(
[
"Node properties are the following:",
",".join(formatted_node_props),
"Relationship properties are the following:",
",".join(formatted_rel_props),
"The relationships are the following:",
",".join(formatted_rels),
]
)
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.chains.graph_qa.cypher.get_function_response",
)
def get_function_response(
question: str, context: List[Dict[str, Any]]
) -> List[BaseMessage]:
TOOL_ID = "call_H7fABDuzEau48T10Qn0Lsh0D"
messages = [
AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": TOOL_ID,
"function": {
"arguments": '{"question":"' + question + '"}',
"name": "GetInformation",
},
"type": "function",
}
]
},
),
ToolMessage(content=str(context), tool_call_id=TOOL_ID),
]
return messages
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.GraphCypherQAChain",
)
class GraphCypherQAChain(Chain):
"""Chain for question-answering against a graph by generating Cypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: GraphStore = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: Union[LLMChain, Runnable]
graph_schema: str
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
"""Number of results to return from the query"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
cypher_query_corrector: Optional[CypherQueryCorrector] = None
"""Optional cypher validation tool"""
use_function_response: bool = False
"""Whether to wrap the database context as tool/function response"""
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@property
def _chain_type(self) -> str:
return "graph_cypher_chain"
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
*,
qa_prompt: Optional[BasePromptTemplate] = None,
cypher_prompt: Optional[BasePromptTemplate] = None,
cypher_llm: Optional[BaseLanguageModel] = None,
qa_llm: Optional[Union[BaseLanguageModel, Any]] = None,
exclude_types: List[str] = [],
include_types: List[str] = [],
validate_cypher: bool = False,
qa_llm_kwargs: Optional[Dict[str, Any]] = None,
cypher_llm_kwargs: Optional[Dict[str, Any]] = None,
use_function_response: bool = False,
function_response_system: str = FUNCTION_RESPONSE_SYSTEM,
**kwargs: Any,
) -> GraphCypherQAChain:
"""Initialize from LLM."""
if not cypher_llm and not llm:
raise ValueError("Either `llm` or `cypher_llm` parameters must be provided")
if not qa_llm and not llm:
raise ValueError("Either `llm` or `qa_llm` parameters must be provided")
if cypher_llm and qa_llm and llm:
raise ValueError(
"You can specify up to two of 'cypher_llm', 'qa_llm'"
", and 'llm', but not all three simultaneously."
)
if cypher_prompt and cypher_llm_kwargs:
raise ValueError(
"Specifying cypher_prompt and cypher_llm_kwargs together is"
" not allowed. Please pass prompt via cypher_llm_kwargs."
)
if qa_prompt and qa_llm_kwargs:
raise ValueError(
"Specifying qa_prompt and qa_llm_kwargs together is"
" not allowed. Please pass prompt via qa_llm_kwargs."
)
use_qa_llm_kwargs = qa_llm_kwargs if qa_llm_kwargs is not None else {}
use_cypher_llm_kwargs = (
cypher_llm_kwargs if cypher_llm_kwargs is not None else {}
)
if "prompt" not in use_qa_llm_kwargs:
use_qa_llm_kwargs["prompt"] = (
qa_prompt if qa_prompt is not None else CYPHER_QA_PROMPT
)
if "prompt" not in use_cypher_llm_kwargs:
use_cypher_llm_kwargs["prompt"] = (
cypher_prompt if cypher_prompt is not None else CYPHER_GENERATION_PROMPT
)
qa_llm = qa_llm or llm
if use_function_response:
try:
qa_llm.bind_tools({}) # type: ignore[union-attr]
response_prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(content=function_response_system),
HumanMessagePromptTemplate.from_template("{question}"),
MessagesPlaceholder(variable_name="function_response"),
]
)
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore
except (NotImplementedError, AttributeError):
raise ValueError("Provided LLM does not support native tools/functions")
else:
qa_chain = LLMChain(llm=qa_llm, **use_qa_llm_kwargs) # type: ignore[arg-type]
cypher_generation_chain = LLMChain(
llm=cypher_llm or llm, # type: ignore[arg-type]
**use_cypher_llm_kwargs, # type: ignore[arg-type]
)
if exclude_types and include_types:
raise ValueError(
"Either `exclude_types` or `include_types` "
"can be provided, but not both"
)
graph_schema = construct_schema(
kwargs["graph"].get_structured_schema, include_types, exclude_types
)
cypher_query_corrector = None
if validate_cypher:
corrector_schema = [
Schema(el["start"], el["type"], el["end"])
for el in kwargs["graph"].structured_schema.get("relationships")
]
cypher_query_corrector = CypherQueryCorrector(corrector_schema)
return cls(
graph_schema=graph_schema,
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
cypher_query_corrector=cypher_query_corrector,
use_function_response=use_function_response,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
args = {
"question": question,
"schema": self.graph_schema,
}
args.update(inputs)
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(args, callbacks=callbacks)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
# Correct Cypher query if enabled
if self.cypher_query_corrector:
generated_cypher = self.cypher_query_corrector(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
# Retrieve and limit the number of results
# Generated Cypher be null if query corrector identifies invalid schema
if generated_cypher:
context = self.graph.query(generated_cypher)[: self.top_k]
else:
context = []
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
if self.use_function_response:
function_response = get_function_response(question, context)
final_result = self.qa_chain.invoke( # type: ignore
{"question": question, "function_response": function_response},
)
else:
result = self.qa_chain.invoke( # type: ignore
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key] # type: ignore
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/gremlin.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks.manager import CallbackManager, CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
GRAPHDB_SPARQL_FIX_TEMPLATE,
GREMLIN_GENERATION_PROMPT,
)
from langchain_community.graphs import GremlinGraph
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def extract_gremlin(text: str) -> str:
"""Extract Gremlin code from a text.
Args:
text: Text to extract Gremlin code from.
Returns:
Gremlin code extracted from the text.
"""
text = text.replace("`", "")
if text.startswith("gremlin"):
text = text[len("gremlin") :]
return text.replace("\n", "")
class GremlinQAChain(Chain):
"""Chain for question-answering against a graph by generating gremlin statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: GremlinGraph = Field(exclude=True)
gremlin_generation_chain: LLMChain
qa_chain: LLMChain
gremlin_fix_chain: LLMChain
max_fix_retries: int = 3
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 100
return_direct: bool = False
return_intermediate_steps: bool = False
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
gremlin_fix_prompt: BasePromptTemplate = PromptTemplate(
input_variables=["error_message", "generated_sparql", "schema"],
template=GRAPHDB_SPARQL_FIX_TEMPLATE.replace("SPARQL", "Gremlin").replace(
"in Turtle format", ""
),
),
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT,
**kwargs: Any,
) -> GremlinQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt)
gremlinl_fix_chain = LLMChain(llm=llm, prompt=gremlin_fix_prompt)
return cls(
qa_chain=qa_chain,
gremlin_generation_chain=gremlin_generation_chain,
gremlin_fix_chain=gremlinl_fix_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate gremlin statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
chain_response = self.gremlin_generation_chain.invoke(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
generated_gremlin = extract_gremlin(
chain_response[self.gremlin_generation_chain.output_key]
)
_run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_gremlin, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_gremlin})
if generated_gremlin:
context = self.execute_with_retry(
_run_manager, callbacks, generated_gremlin
)[: self.top_k]
else:
context = []
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain.invoke(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
def execute_query(self, query: str) -> List[Any]:
try:
return self.graph.query(query)
except Exception as e:
if hasattr(e, "status_message"):
raise ValueError(e.status_message)
else:
raise ValueError(str(e))
def execute_with_retry(
self,
_run_manager: CallbackManagerForChainRun,
callbacks: CallbackManager,
generated_gremlin: str,
) -> List[Any]:
try:
return self.execute_query(generated_gremlin)
except Exception as e:
retries = 0
error_message = str(e)
self.log_invalid_query(_run_manager, generated_gremlin, error_message)
while retries < self.max_fix_retries:
try:
fix_chain_result = self.gremlin_fix_chain.invoke(
{
"error_message": error_message,
# we are borrowing template from sparql
"generated_sparql": generated_gremlin,
"schema": self.schema,
},
callbacks=callbacks,
)
fixed_gremlin = fix_chain_result[self.gremlin_fix_chain.output_key]
return self.execute_query(fixed_gremlin)
except Exception as e:
retries += 1
parse_exception = str(e)
self.log_invalid_query(_run_manager, fixed_gremlin, parse_exception)
raise ValueError("The generated Gremlin query is invalid.")
def log_invalid_query(
self,
_run_manager: CallbackManagerForChainRun,
generated_query: str,
error_message: str,
) -> None:
_run_manager.on_text("Invalid Gremlin query: ", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_query, color="red", end="\n", verbose=self.verbose
)
_run_manager.on_text(
"Gremlin Query Parse Error: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
error_message, color="red", end="\n\n", verbose=self.verbose
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/prompts.py | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return.
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Sam.
Output: Langchain, Sam
END OF EXAMPLE
Begin!
{input}
Output:"""
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
_DEFAULT_GRAPH_QA_TEMPLATE = """Use the following knowledge triplets to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
GRAPH_QA_PROMPT = PromptTemplate(
template=_DEFAULT_GRAPH_QA_TEMPLATE, input_variables=["context", "question"]
)
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
NEBULAGRAPH_EXTRA_INSTRUCTIONS = """
Instructions:
First, generate cypher then convert it to NebulaGraph Cypher dialect(rather than standard):
1. it requires explicit label specification only when referring to node properties: v.`Foo`.name
2. note explicit label specification is not needed for edge properties, so it's e.name instead of e.`Bar`.name
3. it uses double equals sign for comparison: `==` rather than `=`
For instance:
```diff
< MATCH (p:person)-[e:directed]->(m:movie) WHERE m.name = 'The Godfather II'
< RETURN p.name, e.year, m.name;
---
> MATCH (p:`person`)-[e:directed]->(m:`movie`) WHERE m.`movie`.`name` == 'The Godfather II'
> RETURN p.`person`.`name`, e.year, m.`movie`.`name`;
```\n"""
NGQL_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate NebulaGraph Cypher"
).replace("Instructions:", NEBULAGRAPH_EXTRA_INSTRUCTIONS)
NGQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=NGQL_GENERATION_TEMPLATE
)
KUZU_EXTRA_INSTRUCTIONS = """
Instructions:
Generate the Kùzu dialect of Cypher with the following rules in mind:
1. Do not omit the relationship pattern. Always use `()-[]->()` instead of `()->()`.
2. Do not include triple backticks ``` in your response. Return only Cypher.
3. Do not return any notes or comments in your response.
\n"""
KUZU_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate Kùzu Cypher"
).replace("Instructions:", KUZU_EXTRA_INSTRUCTIONS)
KUZU_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=KUZU_GENERATION_TEMPLATE
)
GREMLIN_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace("Cypher", "Gremlin")
GREMLIN_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=GREMLIN_GENERATION_TEMPLATE
)
CYPHER_QA_TEMPLATE = """You are an assistant that helps to form nice and human understandable answers.
The information part contains the provided information that you must use to construct an answer.
The provided information is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make the answer sound as a response to the question. Do not mention that you based the result on the given information.
Here is an example:
Question: Which managers own Neo4j stocks?
Context:[manager:CTL LLC, manager:JANE STREET GROUP LLC]
Helpful Answer: CTL LLC, JANE STREET GROUP LLC owns Neo4j stocks.
Follow this example when generating answers.
If the provided information is empty, say that you don't know the answer.
Information:
{context}
Question: {question}
Helpful Answer:"""
CYPHER_QA_PROMPT = PromptTemplate(
input_variables=["context", "question"], template=CYPHER_QA_TEMPLATE
)
SPARQL_INTENT_TEMPLATE = """Task: Identify the intent of a prompt and return the appropriate SPARQL query type.
You are an assistant that distinguishes different types of prompts and returns the corresponding SPARQL query types.
Consider only the following query types:
* SELECT: this query type corresponds to questions
* UPDATE: this query type corresponds to all requests for deleting, inserting, or changing triples
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to identify a SPARQL query type.
Do not include any unnecessary whitespaces or any text except the query type, i.e., either return 'SELECT' or 'UPDATE'.
The prompt is:
{prompt}
Helpful Answer:"""
SPARQL_INTENT_PROMPT = PromptTemplate(
input_variables=["prompt"], template=SPARQL_INTENT_TEMPLATE
)
SPARQL_GENERATION_SELECT_TEMPLATE = """Task: Generate a SPARQL SELECT statement for querying a graph database.
For instance, to find all email addresses of John Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?email
WHERE {{
?person foaf:name "John Doe" .
?person foaf:mbox ?email .
}}
```
Instructions:
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Do not include any text except the SPARQL query generated.
The question is:
{prompt}"""
SPARQL_GENERATION_SELECT_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_SELECT_TEMPLATE
)
SPARQL_GENERATION_UPDATE_TEMPLATE = """Task: Generate a SPARQL UPDATE statement for updating a graph database.
For instance, to add 'jane.doe@foo.bar' as a new email address for Jane Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
INSERT {{
?person foaf:mbox <mailto:jane.doe@foo.bar> .
}}
WHERE {{
?person foaf:name "Jane Doe" .
}}
```
Instructions:
Make the query as short as possible and avoid adding unnecessary triples.
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Return only the generated SPARQL query, nothing else.
The information to be inserted is:
{prompt}"""
SPARQL_GENERATION_UPDATE_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_UPDATE_TEMPLATE
)
SPARQL_QA_TEMPLATE = """Task: Generate a natural language response from the results of a SPARQL query.
You are an assistant that creates well-written and human understandable answers.
The information part contains the information provided, which you can use to construct an answer.
The information provided is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make your response sound like the information is coming from an AI assistant, but don't add any information.
Information:
{context}
Question: {prompt}
Helpful Answer:"""
SPARQL_QA_PROMPT = PromptTemplate(
input_variables=["context", "prompt"], template=SPARQL_QA_TEMPLATE
)
GRAPHDB_SPARQL_GENERATION_TEMPLATE = """
Write a SPARQL SELECT query for querying a graph database.
The ontology schema delimited by triple backticks in Turtle format is:
```
{schema}
```
Use only the classes and properties provided in the schema to construct the SPARQL query.
Do not use any classes or properties that are not explicitly provided in the SPARQL query.
Include all necessary prefixes.
Do not include any explanations or apologies in your responses.
Do not wrap the query in backticks.
Do not include any text except the SPARQL query generated.
The question delimited by triple backticks is:
```
{prompt}
```
"""
GRAPHDB_SPARQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"],
template=GRAPHDB_SPARQL_GENERATION_TEMPLATE,
)
GRAPHDB_SPARQL_FIX_TEMPLATE = """
This following SPARQL query delimited by triple backticks
```
{generated_sparql}
```
is not valid.
The error delimited by triple backticks is
```
{error_message}
```
Give me a correct version of the SPARQL query.
Do not change the logic of the query.
Do not include any explanations or apologies in your responses.
Do not wrap the query in backticks.
Do not include any text except the SPARQL query generated.
The ontology schema delimited by triple backticks in Turtle format is:
```
{schema}
```
"""
GRAPHDB_SPARQL_FIX_PROMPT = PromptTemplate(
input_variables=["error_message", "generated_sparql", "schema"],
template=GRAPHDB_SPARQL_FIX_TEMPLATE,
)
GRAPHDB_QA_TEMPLATE = """Task: Generate a natural language response from the results of a SPARQL query.
You are an assistant that creates well-written and human understandable answers.
The information part contains the information provided, which you can use to construct an answer.
The information provided is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make your response sound like the information is coming from an AI assistant, but don't add any information.
Don't use internal knowledge to answer the question, just say you don't know if no information is available.
Information:
{context}
Question: {prompt}
Helpful Answer:"""
GRAPHDB_QA_PROMPT = PromptTemplate(
input_variables=["context", "prompt"], template=GRAPHDB_QA_TEMPLATE
)
AQL_GENERATION_TEMPLATE = """Task: Generate an ArangoDB Query Language (AQL) query from a User Input.
You are an ArangoDB Query Language (AQL) expert responsible for translating a `User Input` into an ArangoDB Query Language (AQL) query.
You are given an `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You may also be given a set of `AQL Query Examples` to help you create the `AQL Query`. If provided, the `AQL Query Examples` should be used as a reference, similar to how `ArangoDB Schema` should be used.
Things you should do:
- Think step by step.
- Rely on `ArangoDB Schema` and `AQL Query Examples` (if provided) to generate the query.
- Begin the `AQL Query` by the `WITH` AQL keyword to specify all of the ArangoDB Collections required.
- Return the `AQL Query` wrapped in 3 backticks (```).
- Use only the provided relationship types and properties in the `ArangoDB Schema` and any `AQL Query Examples` queries.
- Only answer to requests related to generating an AQL Query.
- If a request is unrelated to generating AQL Query, say that you cannot help the user.
Things you should not do:
- Do not use any properties/relationships that can't be inferred from the `ArangoDB Schema` or the `AQL Query Examples`.
- Do not include any text except the generated AQL Query.
- Do not provide explanations or apologies in your responses.
- Do not generate an AQL Query that removes or deletes any data.
Under no circumstance should you generate an AQL Query that deletes any data whatsoever.
ArangoDB Schema:
{adb_schema}
AQL Query Examples (Optional):
{aql_examples}
User Input:
{user_input}
AQL Query:
"""
AQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["adb_schema", "aql_examples", "user_input"],
template=AQL_GENERATION_TEMPLATE,
)
AQL_FIX_TEMPLATE = """Task: Address the ArangoDB Query Language (AQL) error message of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for correcting the provided `AQL Query` based on the provided `AQL Error`.
The `AQL Error` explains why the `AQL Query` could not be executed in the database.
The `AQL Error` may also contain the position of the error relative to the total number of lines of the `AQL Query`.
For example, 'error X at position 2:5' denotes that the error X occurs on line 2, column 5 of the `AQL Query`.
You are also given the `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You will output the `Corrected AQL Query` wrapped in 3 backticks (```). Do not include any text except the Corrected AQL Query.
Remember to think step by step.
ArangoDB Schema:
{adb_schema}
AQL Query:
{aql_query}
AQL Error:
{aql_error}
Corrected AQL Query:
"""
AQL_FIX_PROMPT = PromptTemplate(
input_variables=[
"adb_schema",
"aql_query",
"aql_error",
],
template=AQL_FIX_TEMPLATE,
)
AQL_QA_TEMPLATE = """Task: Generate a natural language `Summary` from the results of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for creating a well-written `Summary` from the `User Input` and associated `AQL Result`.
A user has executed an ArangoDB Query Language query, which has returned the AQL Result in JSON format.
You are responsible for creating an `Summary` based on the AQL Result.
You are given the following information:
- `ArangoDB Schema`: contains a schema representation of the user's ArangoDB Database.
- `User Input`: the original question/request of the user, which has been translated into an AQL Query.
- `AQL Query`: the AQL equivalent of the `User Input`, translated by another AI Model. Should you deem it to be incorrect, suggest a different AQL Query.
- `AQL Result`: the JSON output returned by executing the `AQL Query` within the ArangoDB Database.
Remember to think step by step.
Your `Summary` should sound like it is a response to the `User Input`.
Your `Summary` should not include any mention of the `AQL Query` or the `AQL Result`.
ArangoDB Schema:
{adb_schema}
User Input:
{user_input}
AQL Query:
{aql_query}
AQL Result:
{aql_result}
"""
AQL_QA_PROMPT = PromptTemplate(
input_variables=["adb_schema", "user_input", "aql_query", "aql_result"],
template=AQL_QA_TEMPLATE,
)
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS = """
Instructions:
Generate the query in openCypher format and follow these rules:
Do not use `NONE`, `ALL` or `ANY` predicate functions, rather use list comprehensions.
Do not use `REDUCE` function. Rather use a combination of list comprehension and the `UNWIND` clause to achieve similar results.
Do not use `FOREACH` clause. Rather use a combination of `WITH` and `UNWIND` clauses to achieve similar results.{extra_instructions}
\n"""
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Instructions:", NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS
)
NEPTUNE_OPENCYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question", "extra_instructions"],
template=NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
)
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE = """
Write an openCypher query to answer the following question. Do not explain the answer. Only return the query.{extra_instructions}
Question: "{question}".
Here is the property graph schema:
{schema}
\n"""
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT = PromptTemplate(
input_variables=["schema", "question", "extra_instructions"],
template=NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/neptune_cypher.py | from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT,
)
from langchain_community.graphs import BaseNeptuneGraph
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def trim_query(query: str) -> str:
"""Trim the query to only include Cypher keywords."""
keywords = (
"CALL",
"CREATE",
"DELETE",
"DETACH",
"LIMIT",
"MATCH",
"MERGE",
"OPTIONAL",
"ORDER",
"REMOVE",
"RETURN",
"SET",
"SKIP",
"UNWIND",
"WITH",
"WHERE",
"//",
)
lines = query.split("\n")
new_query = ""
for line in lines:
if line.strip().upper().startswith(keywords):
new_query += line + "\n"
return new_query
def extract_cypher(text: str) -> str:
"""Extract Cypher code from text using Regex."""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
def use_simple_prompt(llm: BaseLanguageModel) -> bool:
"""Decides whether to use the simple prompt"""
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore
return True
# Bedrock anthropic
if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore
return True
return False
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
conditionals=[(use_simple_prompt, NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT)],
)
class NeptuneOpenCypherQAChain(Chain):
"""Chain for question-answering against a Neptune graph
by generating openCypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
Example:
.. code-block:: python
chain = NeptuneOpenCypherQAChain.from_llm(
llm=llm,
graph=graph
)
response = chain.run(query)
"""
graph: BaseNeptuneGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
extra_instructions: Optional[str] = None
"""Extra instructions by the appended to the query generation prompt."""
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: Optional[BasePromptTemplate] = None,
extra_instructions: Optional[str] = None,
**kwargs: Any,
) -> NeptuneOpenCypherQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
_cypher_prompt = cypher_prompt or PROMPT_SELECTOR.get_prompt(llm)
cypher_generation_chain = LLMChain(llm=llm, prompt=_cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
extra_instructions=extra_instructions,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{
"question": question,
"schema": self.graph.get_schema,
"extra_instructions": self.extra_instructions or "",
},
callbacks=callbacks,
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
generated_cypher = trim_query(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
context = self.graph.query(generated_cypher)
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/nebulagraph.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
NGQL_GENERATION_PROMPT,
)
from langchain_community.graphs.nebula_graph import NebulaGraph
class NebulaGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating nGQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: NebulaGraph = Field(exclude=True)
ngql_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
ngql_prompt: BasePromptTemplate = NGQL_GENERATION_PROMPT,
**kwargs: Any,
) -> NebulaGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt)
return cls(
qa_chain=qa_chain,
ngql_generation_chain=ngql_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate nGQL statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_ngql = self.ngql_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated nGQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_ngql, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_ngql)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/sparql.py | """
Question answering over an RDF or OWL graph using SPARQL.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
SPARQL_GENERATION_SELECT_PROMPT,
SPARQL_GENERATION_UPDATE_PROMPT,
SPARQL_INTENT_PROMPT,
SPARQL_QA_PROMPT,
)
from langchain_community.graphs.rdf_graph import RdfGraph
class GraphSparqlQAChain(Chain):
"""Question-answering against an RDF or OWL graph by generating SPARQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: RdfGraph = Field(exclude=True)
sparql_generation_select_chain: LLMChain
sparql_generation_update_chain: LLMChain
sparql_intent_chain: LLMChain
qa_chain: LLMChain
return_sparql_query: bool = False
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
sparql_query_key: str = "sparql_query" #: :meta private:
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT,
sparql_select_prompt: BasePromptTemplate = SPARQL_GENERATION_SELECT_PROMPT,
sparql_update_prompt: BasePromptTemplate = SPARQL_GENERATION_UPDATE_PROMPT,
sparql_intent_prompt: BasePromptTemplate = SPARQL_INTENT_PROMPT,
**kwargs: Any,
) -> GraphSparqlQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt)
sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt)
sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt)
return cls(
qa_chain=qa_chain,
sparql_generation_select_chain=sparql_generation_select_chain,
sparql_generation_update_chain=sparql_generation_update_chain,
sparql_intent_chain=sparql_intent_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
_intent = self.sparql_intent_chain.run({"prompt": prompt}, callbacks=callbacks)
intent = _intent.strip()
if "SELECT" in intent and "UPDATE" not in intent:
sparql_generation_chain = self.sparql_generation_select_chain
intent = "SELECT"
elif "UPDATE" in intent and "SELECT" not in intent:
sparql_generation_chain = self.sparql_generation_update_chain
intent = "UPDATE"
else:
raise ValueError(
"I am sorry, but this prompt seems to fit none of the currently "
"supported SPARQL query types, i.e., SELECT and UPDATE."
)
_run_manager.on_text("Identified intent:", end="\n", verbose=self.verbose)
_run_manager.on_text(intent, color="green", end="\n", verbose=self.verbose)
generated_sparql = sparql_generation_chain.run(
{"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_sparql, color="green", end="\n", verbose=self.verbose
)
if intent == "SELECT":
context = self.graph.query(generated_sparql)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"prompt": prompt, "context": context},
callbacks=callbacks,
)
res = result[self.qa_chain.output_key]
elif intent == "UPDATE":
self.graph.update(generated_sparql)
res = "Successfully inserted triples into the graph."
else:
raise ValueError("Unsupported SPARQL query type.")
chain_result: Dict[str, Any] = {self.output_key: res}
if self.return_sparql_query:
chain_result[self.sparql_query_key] = generated_sparql
return chain_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/cypher_utils.py | import re
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
from langchain_core._api.deprecation import deprecated
Schema = namedtuple("Schema", ["left_node", "relation", "right_node"])
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.chains.graph_qa.cypher_utils.CypherQueryCorrector",
)
class CypherQueryCorrector:
"""
Used to correct relationship direction in generated Cypher statements.
This code is copied from the winner's submission to the Cypher competition:
https://github.com/sakusaku-rich/cypher-direction-competition
"""
property_pattern = re.compile(r"\{.+?\}")
node_pattern = re.compile(r"\(.+?\)")
path_pattern = re.compile(
r"(\([^\,\(\)]*?(\{.+\})?[^\,\(\)]*?\))(<?-)(\[.*?\])?(->?)(\([^\,\(\)]*?(\{.+\})?[^\,\(\)]*?\))"
)
node_relation_node_pattern = re.compile(
r"(\()+(?P<left_node>[^()]*?)\)(?P<relation>.*?)\((?P<right_node>[^()]*?)(\))+"
)
relation_type_pattern = re.compile(r":(?P<relation_type>.+?)?(\{.+\})?]")
def __init__(self, schemas: List[Schema]):
"""
Args:
schemas: list of schemas
"""
self.schemas = schemas
def clean_node(self, node: str) -> str:
"""
Args:
node: node in string format
"""
node = re.sub(self.property_pattern, "", node)
node = node.replace("(", "")
node = node.replace(")", "")
node = node.strip()
return node
def detect_node_variables(self, query: str) -> Dict[str, List[str]]:
"""
Args:
query: cypher query
"""
nodes = re.findall(self.node_pattern, query)
nodes = [self.clean_node(node) for node in nodes]
res: Dict[str, Any] = {}
for node in nodes:
parts = node.split(":")
if parts == "":
continue
variable = parts[0]
if variable not in res:
res[variable] = []
res[variable] += parts[1:]
return res
def extract_paths(self, query: str) -> "List[str]":
"""
Args:
query: cypher query
"""
paths = []
idx = 0
while matched := self.path_pattern.findall(query[idx:]):
matched = matched[0]
matched = [
m for i, m in enumerate(matched) if i not in [1, len(matched) - 1]
]
path = "".join(matched)
idx = query.find(path) + len(path) - len(matched[-1])
paths.append(path)
return paths
def judge_direction(self, relation: str) -> str:
"""
Args:
relation: relation in string format
"""
direction = "BIDIRECTIONAL"
if relation[0] == "<":
direction = "INCOMING"
if relation[-1] == ">":
direction = "OUTGOING"
return direction
def extract_node_variable(self, part: str) -> Optional[str]:
"""
Args:
part: node in string format
"""
part = part.lstrip("(").rstrip(")")
idx = part.find(":")
if idx != -1:
part = part[:idx]
return None if part == "" else part
def detect_labels(
self, str_node: str, node_variable_dict: Dict[str, Any]
) -> List[str]:
"""
Args:
str_node: node in string format
node_variable_dict: dictionary of node variables
"""
splitted_node = str_node.split(":")
variable = splitted_node[0]
labels = []
if variable in node_variable_dict:
labels = node_variable_dict[variable]
elif variable == "" and len(splitted_node) > 1:
labels = splitted_node[1:]
return labels
def verify_schema(
self,
from_node_labels: List[str],
relation_types: List[str],
to_node_labels: List[str],
) -> bool:
"""
Args:
from_node_labels: labels of the from node
relation_type: type of the relation
to_node_labels: labels of the to node
"""
valid_schemas = self.schemas
if from_node_labels != []:
from_node_labels = [label.strip("`") for label in from_node_labels]
valid_schemas = [
schema for schema in valid_schemas if schema[0] in from_node_labels
]
if to_node_labels != []:
to_node_labels = [label.strip("`") for label in to_node_labels]
valid_schemas = [
schema for schema in valid_schemas if schema[2] in to_node_labels
]
if relation_types != []:
relation_types = [type.strip("`") for type in relation_types]
valid_schemas = [
schema for schema in valid_schemas if schema[1] in relation_types
]
return valid_schemas != []
def detect_relation_types(self, str_relation: str) -> Tuple[str, List[str]]:
"""
Args:
str_relation: relation in string format
"""
relation_direction = self.judge_direction(str_relation)
relation_type = self.relation_type_pattern.search(str_relation)
if relation_type is None or relation_type.group("relation_type") is None:
return relation_direction, []
relation_types = [
t.strip().strip("!")
for t in relation_type.group("relation_type").split("|")
]
return relation_direction, relation_types
def correct_query(self, query: str) -> str:
"""
Args:
query: cypher query
"""
node_variable_dict = self.detect_node_variables(query)
paths = self.extract_paths(query)
for path in paths:
original_path = path
start_idx = 0
while start_idx < len(path):
match_res = re.match(self.node_relation_node_pattern, path[start_idx:])
if match_res is None:
break
start_idx += match_res.start()
match_dict = match_res.groupdict()
left_node_labels = self.detect_labels(
match_dict["left_node"], node_variable_dict
)
right_node_labels = self.detect_labels(
match_dict["right_node"], node_variable_dict
)
end_idx = (
start_idx
+ 4
+ len(match_dict["left_node"])
+ len(match_dict["relation"])
+ len(match_dict["right_node"])
)
original_partial_path = original_path[start_idx : end_idx + 1]
relation_direction, relation_types = self.detect_relation_types(
match_dict["relation"]
)
if relation_types != [] and "".join(relation_types).find("*") != -1:
start_idx += (
len(match_dict["left_node"]) + len(match_dict["relation"]) + 2
)
continue
if relation_direction == "OUTGOING":
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
if not is_legal:
is_legal = self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if is_legal:
corrected_relation = "<" + match_dict["relation"][:-1]
corrected_partial_path = original_partial_path.replace(
match_dict["relation"], corrected_relation
)
query = query.replace(
original_partial_path, corrected_partial_path
)
else:
return ""
elif relation_direction == "INCOMING":
is_legal = self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if not is_legal:
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
if is_legal:
corrected_relation = match_dict["relation"][1:] + ">"
corrected_partial_path = original_partial_path.replace(
match_dict["relation"], corrected_relation
)
query = query.replace(
original_partial_path, corrected_partial_path
)
else:
return ""
else:
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
is_legal |= self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if not is_legal:
return ""
start_idx += (
len(match_dict["left_node"]) + len(match_dict["relation"]) + 2
)
return query
def __call__(self, query: str) -> str:
"""Correct the query to make it valid. If
Args:
query: cypher query
"""
return self.correct_query(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/ontotext_graphdb.py | """Question answering over a graph."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
if TYPE_CHECKING:
import rdflib
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks.manager import CallbackManager, CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
GRAPHDB_QA_PROMPT,
GRAPHDB_SPARQL_FIX_PROMPT,
GRAPHDB_SPARQL_GENERATION_PROMPT,
)
from langchain_community.graphs import OntotextGraphDBGraph
class OntotextGraphDBQAChain(Chain):
"""Question-answering against Ontotext GraphDB
https://graphdb.ontotext.com/ by generating SPARQL queries.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: OntotextGraphDBGraph = Field(exclude=True)
sparql_generation_chain: LLMChain
sparql_fix_chain: LLMChain
max_fix_retries: int
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
sparql_generation_prompt: BasePromptTemplate = GRAPHDB_SPARQL_GENERATION_PROMPT,
sparql_fix_prompt: BasePromptTemplate = GRAPHDB_SPARQL_FIX_PROMPT,
max_fix_retries: int = 5,
qa_prompt: BasePromptTemplate = GRAPHDB_QA_PROMPT,
**kwargs: Any,
) -> OntotextGraphDBQAChain:
"""Initialize from LLM."""
sparql_generation_chain = LLMChain(llm=llm, prompt=sparql_generation_prompt)
sparql_fix_chain = LLMChain(llm=llm, prompt=sparql_fix_prompt)
max_fix_retries = max_fix_retries
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
return cls(
qa_chain=qa_chain,
sparql_generation_chain=sparql_generation_chain,
sparql_fix_chain=sparql_fix_chain,
max_fix_retries=max_fix_retries,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Generate a SPARQL query, use it to retrieve a response from GraphDB and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
ontology_schema = self.graph.get_schema
sparql_generation_chain_result = self.sparql_generation_chain.invoke(
{"prompt": prompt, "schema": ontology_schema}, callbacks=callbacks
)
generated_sparql = sparql_generation_chain_result[
self.sparql_generation_chain.output_key
]
generated_sparql = self._get_prepared_sparql_query(
_run_manager, callbacks, generated_sparql, ontology_schema
)
query_results = self._execute_query(generated_sparql)
qa_chain_result = self.qa_chain.invoke(
{"prompt": prompt, "context": query_results}, callbacks=callbacks
)
result = qa_chain_result[self.qa_chain.output_key]
return {self.output_key: result}
def _get_prepared_sparql_query(
self,
_run_manager: CallbackManagerForChainRun,
callbacks: CallbackManager,
generated_sparql: str,
ontology_schema: str,
) -> str:
try:
return self._prepare_sparql_query(_run_manager, generated_sparql)
except Exception as e:
retries = 0
error_message = str(e)
self._log_invalid_sparql_query(
_run_manager, generated_sparql, error_message
)
while retries < self.max_fix_retries:
try:
sparql_fix_chain_result = self.sparql_fix_chain.invoke(
{
"error_message": error_message,
"generated_sparql": generated_sparql,
"schema": ontology_schema,
},
callbacks=callbacks,
)
generated_sparql = sparql_fix_chain_result[
self.sparql_fix_chain.output_key
]
return self._prepare_sparql_query(_run_manager, generated_sparql)
except Exception as e:
retries += 1
parse_exception = str(e)
self._log_invalid_sparql_query(
_run_manager, generated_sparql, parse_exception
)
raise ValueError("The generated SPARQL query is invalid.")
def _prepare_sparql_query(
self, _run_manager: CallbackManagerForChainRun, generated_sparql: str
) -> str:
from rdflib.plugins.sparql import prepareQuery
prepareQuery(generated_sparql)
self._log_prepared_sparql_query(_run_manager, generated_sparql)
return generated_sparql
def _log_prepared_sparql_query(
self, _run_manager: CallbackManagerForChainRun, generated_query: str
) -> None:
_run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_query, color="green", end="\n", verbose=self.verbose
)
def _log_invalid_sparql_query(
self,
_run_manager: CallbackManagerForChainRun,
generated_query: str,
error_message: str,
) -> None:
_run_manager.on_text("Invalid SPARQL query: ", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_query, color="red", end="\n", verbose=self.verbose
)
_run_manager.on_text(
"SPARQL Query Parse Error: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
error_message, color="red", end="\n\n", verbose=self.verbose
)
def _execute_query(self, query: str) -> List[rdflib.query.ResultRow]:
try:
return self.graph.query(query)
except Exception:
raise ValueError("Failed to execute the generated SPARQL query.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/__init__.py | """Question answering over a knowledge graph."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/kuzu.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
KUZU_GENERATION_PROMPT,
)
from langchain_community.graphs.kuzu_graph import KuzuGraph
def remove_prefix(text: str, prefix: str) -> str:
"""Remove a prefix from a text.
Args:
text: Text to remove the prefix from.
prefix: Prefix to remove from the text.
Returns:
Text with the prefix removed.
"""
if text.startswith(prefix):
return text[len(prefix) :]
return text
def extract_cypher(text: str) -> str:
"""Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
class KuzuQAChain(Chain):
"""Question-answering against a graph by generating Cypher statements for Kùzu.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: KuzuGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT,
cypher_llm: Optional[BaseLanguageModel] = None,
qa_llm: Optional[BaseLanguageModel] = None,
**kwargs: Any,
) -> KuzuQAChain:
"""Initialize from LLM."""
if not cypher_llm and not llm:
raise ValueError("Either `llm` or `cypher_llm` parameters must be provided")
if not qa_llm and not llm:
raise ValueError(
"Either `llm` or `qa_llm` parameters must be provided along with"
" `cypher_llm`"
)
if cypher_llm and qa_llm and llm:
raise ValueError(
"You can specify up to two of 'cypher_llm', 'qa_llm'"
", and 'llm', but not all three simultaneously."
)
qa_chain = LLMChain(
llm=qa_llm or llm, # type: ignore[arg-type]
prompt=qa_prompt,
)
cypher_generation_chain = LLMChain(
llm=cypher_llm or llm, # type: ignore[arg-type]
prompt=cypher_prompt,
)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
# Extract Cypher code if it is wrapped in triple backticks
# with the language marker "cypher"
generated_cypher = remove_prefix(extract_cypher(generated_cypher), "cypher")
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_cypher)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/graph_qa/neptune_sparql.py | """
Question answering over an RDF or OWL graph using SPARQL.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks.manager import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from pydantic import Field
from langchain_community.chains.graph_qa.prompts import SPARQL_QA_PROMPT
from langchain_community.graphs import NeptuneRdfGraph
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
SPARQL_GENERATION_TEMPLATE = """
Task: Generate a SPARQL SELECT statement for querying a graph database.
For instance, to find all email addresses of John Doe, the following
query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?email
WHERE {{
?person foaf:name "John Doe" .
?person foaf:mbox ?email .
}}
```
Instructions:
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Examples:
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than
for you to construct a SPARQL query.
Do not include any text except the SPARQL query generated.
The question is:
{prompt}"""
SPARQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_TEMPLATE
)
def extract_sparql(query: str) -> str:
"""Extract SPARQL code from a text.
Args:
query: Text to extract SPARQL code from.
Returns:
SPARQL code extracted from the text.
"""
query = query.strip()
querytoks = query.split("```")
if len(querytoks) == 3:
query = querytoks[1]
if query.startswith("sparql"):
query = query[6:]
elif query.startswith("<sparql>") and query.endswith("</sparql>"):
query = query[8:-9]
return query
class NeptuneSparqlQAChain(Chain):
"""Chain for question-answering against a Neptune graph
by generating SPARQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
Example:
.. code-block:: python
chain = NeptuneSparqlQAChain.from_llm(
llm=llm,
graph=graph
)
response = chain.invoke(query)
"""
graph: NeptuneRdfGraph = Field(exclude=True)
sparql_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
extra_instructions: Optional[str] = None
"""Extra instructions by the appended to the query generation prompt."""
allow_dangerous_requests: bool = False
"""Forced user opt-in to acknowledge that the chain can make dangerous requests.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the chain."""
super().__init__(**kwargs)
if self.allow_dangerous_requests is not True:
raise ValueError(
"In order to use this chain, you must acknowledge that it can make "
"dangerous requests by setting `allow_dangerous_requests` to `True`."
"You must narrowly scope the permissions of the database connection "
"to only include necessary permissions. Failure to do so may result "
"in data corruption or loss or reading sensitive data if such data is "
"present in the database."
"Only use this chain if you understand the risks and have taken the "
"necessary precautions. "
"See https://python.langchain.com/docs/security for more information."
)
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT,
sparql_prompt: BasePromptTemplate = SPARQL_GENERATION_PROMPT,
examples: Optional[str] = None,
**kwargs: Any,
) -> NeptuneSparqlQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
template_to_use = SPARQL_GENERATION_TEMPLATE
if examples:
template_to_use = template_to_use.replace(
"Examples:", "Examples: " + examples
)
sparql_prompt = PromptTemplate(
input_variables=["schema", "prompt"], template=template_to_use
)
sparql_generation_chain = LLMChain(llm=llm, prompt=sparql_prompt)
return cls( # type: ignore[call-arg]
qa_chain=qa_chain,
sparql_generation_chain=sparql_generation_chain,
examples=examples,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
intermediate_steps: List = []
generated_sparql = self.sparql_generation_chain.run(
{"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks
)
# Extract SPARQL
generated_sparql = extract_sparql(generated_sparql)
_run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_sparql, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_sparql})
context = self.graph.query(generated_sparql)
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"prompt": prompt, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/natbot/base.py | from langchain.chains import NatBotChain
__all__ = ["NatBotChain"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/natbot/crawler.py | from langchain.chains.natbot.crawler import (
Crawler,
ElementInViewPort,
black_listed_elements,
)
__all__ = ["ElementInViewPort", "Crawler", "black_listed_elements"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/natbot/__init__.py | """Implement a GPT-3 driven browser.
Heavily influenced from https://github.com/nat/natbot
"""
from langchain_community.chains.natbot.base import NatBotChain
__all__ = ["NatBotChain"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/natbot/prompt.py | from langchain.chains.natbot.prompt import PROMPT
__all__ = ["PROMPT"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py | import json
import logging
import os
import platform
from enum import Enum
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple
import aiohttp
from aiohttp import ClientTimeout
from langchain_core.documents import Document
from langchain_core.env import get_runtime_environment
from langchain_core.utils import get_from_dict_or_env
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import BaseModel
from requests import Response, request
from requests.exceptions import RequestException
from langchain_community.chains.pebblo_retrieval.models import (
App,
AuthContext,
Context,
Framework,
Prompt,
Qa,
Runtime,
)
logger = logging.getLogger(__name__)
PLUGIN_VERSION = "0.1.1"
_DEFAULT_CLASSIFIER_URL = "http://localhost:8000"
_DEFAULT_PEBBLO_CLOUD_URL = "https://api.daxa.ai"
class Routes(str, Enum):
"""Routes available for the Pebblo API as enumerator."""
retrieval_app_discover = "/v1/app/discover"
prompt = "/v1/prompt"
prompt_governance = "/v1/prompt/governance"
def get_runtime() -> Tuple[Framework, Runtime]:
"""Fetch the current Framework and Runtime details.
Returns:
Tuple[Framework, Runtime]: Framework and Runtime for the current app instance.
"""
runtime_env = get_runtime_environment()
framework = Framework(
name="langchain", version=runtime_env.get("library_version", None)
)
uname = platform.uname()
runtime = Runtime(
host=uname.node,
path=os.environ["PWD"],
platform=runtime_env.get("platform", "unknown"),
os=uname.system,
os_version=uname.version,
ip=get_ip(),
language=runtime_env.get("runtime", "unknown"),
language_version=runtime_env.get("runtime_version", "unknown"),
)
if "Darwin" in runtime.os:
runtime.type = "desktop"
runtime.runtime = "Mac OSX"
logger.debug(f"framework {framework}")
logger.debug(f"runtime {runtime}")
return framework, runtime
def get_ip() -> str:
"""Fetch local runtime ip address.
Returns:
str: IP address
"""
import socket # lazy imports
host = socket.gethostname()
try:
public_ip = socket.gethostbyname(host)
except Exception:
public_ip = socket.gethostbyname("localhost")
return public_ip
class PebbloRetrievalAPIWrapper(BaseModel):
"""Wrapper for Pebblo Retrieval API."""
api_key: Optional[str] # Use SecretStr
"""API key for Pebblo Cloud"""
classifier_location: str = "local"
"""Location of the classifier, local or cloud. Defaults to 'local'"""
classifier_url: Optional[str]
"""URL of the Pebblo Classifier"""
cloud_url: Optional[str]
"""URL of the Pebblo Cloud"""
def __init__(self, **kwargs: Any):
"""Validate that api key in environment."""
kwargs["api_key"] = get_from_dict_or_env(
kwargs, "api_key", "PEBBLO_API_KEY", ""
)
kwargs["classifier_url"] = get_from_dict_or_env(
kwargs, "classifier_url", "PEBBLO_CLASSIFIER_URL", _DEFAULT_CLASSIFIER_URL
)
kwargs["cloud_url"] = get_from_dict_or_env(
kwargs, "cloud_url", "PEBBLO_CLOUD_URL", _DEFAULT_PEBBLO_CLOUD_URL
)
super().__init__(**kwargs)
def send_app_discover(self, app: App) -> None:
"""
Send app discovery request to Pebblo server & cloud.
Args:
app (App): App instance to be discovered.
"""
pebblo_resp = None
payload = app.dict(exclude_unset=True)
if self.classifier_location == "local":
# Send app details to local classifier
headers = self._make_headers()
app_discover_url = (
f"{self.classifier_url}{Routes.retrieval_app_discover.value}"
)
pebblo_resp = self.make_request("POST", app_discover_url, headers, payload)
if self.api_key:
# Send app details to Pebblo cloud if api_key is present
headers = self._make_headers(cloud_request=True)
if pebblo_resp:
pebblo_server_version = json.loads(pebblo_resp.text).get(
"pebblo_server_version"
)
payload.update({"pebblo_server_version": pebblo_server_version})
payload.update({"pebblo_client_version": PLUGIN_VERSION})
pebblo_cloud_url = f"{self.cloud_url}{Routes.retrieval_app_discover.value}"
_ = self.make_request("POST", pebblo_cloud_url, headers, payload)
def send_prompt(
self,
app_name: str,
retriever: VectorStoreRetriever,
question: str,
answer: str,
auth_context: Optional[AuthContext],
docs: List[Document],
prompt_entities: Dict[str, Any],
prompt_time: str,
prompt_gov_enabled: bool = False,
) -> None:
"""
Send prompt to Pebblo server for classification.
Then send prompt to Daxa cloud(If api_key is present).
Args:
app_name (str): Name of the app.
retriever (VectorStoreRetriever): Retriever instance.
question (str): Question asked in the prompt.
answer (str): Answer generated by the model.
auth_context (Optional[AuthContext]): Authentication context.
docs (List[Document]): List of documents retrieved.
prompt_entities (Dict[str, Any]): Entities present in the prompt.
prompt_time (str): Time when the prompt was generated.
prompt_gov_enabled (bool): Whether prompt governance is enabled.
"""
pebblo_resp = None
payload = self.build_prompt_qa_payload(
app_name,
retriever,
question,
answer,
auth_context,
docs,
prompt_entities,
prompt_time,
prompt_gov_enabled,
)
if self.classifier_location == "local":
# Send prompt to local classifier
headers = self._make_headers()
prompt_url = f"{self.classifier_url}{Routes.prompt.value}"
pebblo_resp = self.make_request("POST", prompt_url, headers, payload)
if self.api_key:
# Send prompt to Pebblo cloud if api_key is present
if self.classifier_location == "local":
# If classifier location is local, then response, context and prompt
# should be fetched from pebblo_resp and replaced in payload.
pebblo_resp = pebblo_resp.json() if pebblo_resp else None
self.update_cloud_payload(payload, pebblo_resp)
headers = self._make_headers(cloud_request=True)
pebblo_cloud_prompt_url = f"{self.cloud_url}{Routes.prompt.value}"
_ = self.make_request("POST", pebblo_cloud_prompt_url, headers, payload)
elif self.classifier_location == "pebblo-cloud":
logger.warning("API key is missing for sending prompt to Pebblo cloud.")
raise NameError("API key is missing for sending prompt to Pebblo cloud.")
async def asend_prompt(
self,
app_name: str,
retriever: VectorStoreRetriever,
question: str,
answer: str,
auth_context: Optional[AuthContext],
docs: List[Document],
prompt_entities: Dict[str, Any],
prompt_time: str,
prompt_gov_enabled: bool = False,
) -> None:
"""
Send prompt to Pebblo server for classification.
Then send prompt to Daxa cloud(If api_key is present).
Args:
app_name (str): Name of the app.
retriever (VectorStoreRetriever): Retriever instance.
question (str): Question asked in the prompt.
answer (str): Answer generated by the model.
auth_context (Optional[AuthContext]): Authentication context.
docs (List[Document]): List of documents retrieved.
prompt_entities (Dict[str, Any]): Entities present in the prompt.
prompt_time (str): Time when the prompt was generated.
prompt_gov_enabled (bool): Whether prompt governance is enabled.
"""
pebblo_resp = None
payload = self.build_prompt_qa_payload(
app_name,
retriever,
question,
answer,
auth_context,
docs,
prompt_entities,
prompt_time,
prompt_gov_enabled,
)
if self.classifier_location == "local":
# Send prompt to local classifier
headers = self._make_headers()
prompt_url = f"{self.classifier_url}{Routes.prompt.value}"
pebblo_resp = await self.amake_request("POST", prompt_url, headers, payload)
if self.api_key:
# Send prompt to Pebblo cloud if api_key is present
if self.classifier_location == "local":
# If classifier location is local, then response, context and prompt
# should be fetched from pebblo_resp and replaced in payload.
self.update_cloud_payload(payload, pebblo_resp)
headers = self._make_headers(cloud_request=True)
pebblo_cloud_prompt_url = f"{self.cloud_url}{Routes.prompt.value}"
_ = await self.amake_request(
"POST", pebblo_cloud_prompt_url, headers, payload
)
elif self.classifier_location == "pebblo-cloud":
logger.warning("API key is missing for sending prompt to Pebblo cloud.")
raise NameError("API key is missing for sending prompt to Pebblo cloud.")
def check_prompt_validity(self, question: str) -> Tuple[bool, Dict[str, Any]]:
"""
Check the validity of the given prompt using a remote classification service.
This method sends a prompt to a remote classifier service and return entities
present in prompt or not.
Args:
question (str): The prompt question to be validated.
Returns:
bool: True if the prompt is valid (does not contain deny list entities),
False otherwise.
dict: The entities present in the prompt
"""
prompt_payload = {"prompt": question}
prompt_entities: dict = {"entities": {}, "entityCount": 0}
is_valid_prompt: bool = True
if self.classifier_location == "local":
headers = self._make_headers()
prompt_gov_api_url = (
f"{self.classifier_url}{Routes.prompt_governance.value}"
)
pebblo_resp = self.make_request(
"POST", prompt_gov_api_url, headers, prompt_payload
)
if pebblo_resp:
prompt_entities["entities"] = pebblo_resp.json().get("entities", {})
prompt_entities["entityCount"] = pebblo_resp.json().get(
"entityCount", 0
)
return is_valid_prompt, prompt_entities
async def acheck_prompt_validity(
self, question: str
) -> Tuple[bool, Dict[str, Any]]:
"""
Check the validity of the given prompt using a remote classification service.
This method sends a prompt to a remote classifier service and return entities
present in prompt or not.
Args:
question (str): The prompt question to be validated.
Returns:
bool: True if the prompt is valid (does not contain deny list entities),
False otherwise.
dict: The entities present in the prompt
"""
prompt_payload = {"prompt": question}
prompt_entities: dict = {"entities": {}, "entityCount": 0}
is_valid_prompt: bool = True
if self.classifier_location == "local":
headers = self._make_headers()
prompt_gov_api_url = (
f"{self.classifier_url}{Routes.prompt_governance.value}"
)
pebblo_resp = await self.amake_request(
"POST", prompt_gov_api_url, headers, prompt_payload
)
if pebblo_resp:
prompt_entities["entities"] = pebblo_resp.get("entities", {})
prompt_entities["entityCount"] = pebblo_resp.get("entityCount", 0)
return is_valid_prompt, prompt_entities
def _make_headers(self, cloud_request: bool = False) -> dict:
"""
Generate headers for the request.
args:
cloud_request (bool): flag indicating whether the request is for Pebblo
cloud.
returns:
dict: Headers for the request.
"""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if cloud_request:
# Add API key for Pebblo cloud request
if self.api_key:
headers.update({"x-api-key": self.api_key})
else:
logger.warning("API key is missing for Pebblo cloud request.")
return headers
@staticmethod
def make_request(
method: str,
url: str,
headers: dict,
payload: Optional[dict] = None,
timeout: int = 20,
) -> Optional[Response]:
"""
Make a request to the Pebblo server/cloud API.
Args:
method (str): HTTP method (GET, POST, PUT, DELETE, etc.).
url (str): URL for the request.
headers (dict): Headers for the request.
payload (Optional[dict]): Payload for the request (for POST, PUT, etc.).
timeout (int): Timeout for the request in seconds.
Returns:
Optional[Response]: Response object if the request is successful.
"""
try:
response = request(
method=method, url=url, headers=headers, json=payload, timeout=timeout
)
logger.debug(
"Request: method %s, url %s, len %s response status %s",
method,
response.request.url,
str(len(response.request.body if response.request.body else [])),
str(response.status_code),
)
if response.status_code >= HTTPStatus.INTERNAL_SERVER_ERROR:
logger.warning(f"Pebblo Server: Error {response.status_code}")
elif response.status_code >= HTTPStatus.BAD_REQUEST:
logger.warning(f"Pebblo received an invalid payload: {response.text}")
elif response.status_code != HTTPStatus.OK:
logger.warning(
f"Pebblo returned an unexpected response code: "
f"{response.status_code}"
)
return response
except RequestException:
logger.warning("Unable to reach server %s", url)
except Exception as e:
logger.warning("An Exception caught in make_request: %s", e)
return None
@staticmethod
def update_cloud_payload(payload: dict, pebblo_resp: Optional[dict]) -> None:
"""
Update the payload with response, prompt and context from Pebblo response.
Args:
payload (dict): Payload to be updated.
pebblo_resp (Optional[dict]): Response from Pebblo server.
"""
if pebblo_resp:
# Update response, prompt and context from pebblo response
response = payload.get("response", {})
response.update(pebblo_resp.get("retrieval_data", {}).get("response", {}))
response.pop("data", None)
prompt = payload.get("prompt", {})
prompt.update(pebblo_resp.get("retrieval_data", {}).get("prompt", {}))
prompt.pop("data", None)
context = payload.get("context", [])
for context_data in context:
context_data.pop("doc", None)
else:
payload["response"] = {}
payload["prompt"] = {}
payload["context"] = []
@staticmethod
async def amake_request(
method: str,
url: str,
headers: dict,
payload: Optional[dict] = None,
timeout: int = 20,
) -> Any:
"""
Make a async request to the Pebblo server/cloud API.
Args:
method (str): HTTP method (GET, POST, PUT, DELETE, etc.).
url (str): URL for the request.
headers (dict): Headers for the request.
payload (Optional[dict]): Payload for the request (for POST, PUT, etc.).
timeout (int): Timeout for the request in seconds.
Returns:
Any: Response json if the request is successful.
"""
try:
client_timeout = ClientTimeout(total=timeout)
async with aiohttp.ClientSession() as asession:
async with asession.request(
method=method,
url=url,
json=payload,
headers=headers,
timeout=client_timeout,
) as response:
if response.status >= HTTPStatus.INTERNAL_SERVER_ERROR:
logger.warning(f"Pebblo Server: Error {response.status}")
elif response.status >= HTTPStatus.BAD_REQUEST:
logger.warning(
f"Pebblo received an invalid payload: " f"{response.text}"
)
elif response.status != HTTPStatus.OK:
logger.warning(
f"Pebblo returned an unexpected response code: "
f"{response.status}"
)
response_json = await response.json()
return response_json
except RequestException:
logger.warning("Unable to reach server %s", url)
except Exception as e:
logger.warning("An Exception caught in amake_request: %s", e)
return None
def build_prompt_qa_payload(
self,
app_name: str,
retriever: VectorStoreRetriever,
question: str,
answer: str,
auth_context: Optional[AuthContext],
docs: List[Document],
prompt_entities: Dict[str, Any],
prompt_time: str,
prompt_gov_enabled: bool = False,
) -> dict:
"""
Build the QA payload for the prompt.
Args:
app_name (str): Name of the app.
retriever (VectorStoreRetriever): Retriever instance.
question (str): Question asked in the prompt.
answer (str): Answer generated by the model.
auth_context (Optional[AuthContext]): Authentication context.
docs (List[Document]): List of documents retrieved.
prompt_entities (Dict[str, Any]): Entities present in the prompt.
prompt_time (str): Time when the prompt was generated.
prompt_gov_enabled (bool): Whether prompt governance is enabled.
Returns:
dict: The QA payload for the prompt.
"""
qa = Qa(
name=app_name,
context=[
Context(
retrieved_from=doc.metadata.get(
"full_path", doc.metadata.get("source")
),
doc=doc.page_content,
vector_db=retriever.vectorstore.__class__.__name__,
pb_checksum=doc.metadata.get("pb_checksum"),
)
for doc in docs
if isinstance(doc, Document)
],
prompt=Prompt(
data=question,
entities=prompt_entities.get("entities", {}),
entityCount=prompt_entities.get("entityCount", 0),
prompt_gov_enabled=prompt_gov_enabled,
),
response=Prompt(data=answer),
prompt_time=prompt_time,
user=auth_context.user_id if auth_context else "unknown",
user_identities=auth_context.user_auth
if auth_context and hasattr(auth_context, "user_auth")
else [],
classifier_location=self.classifier_location,
)
return qa.dict(exclude_unset=True)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/pebblo_retrieval/models.py | """Models for the PebbloRetrievalQA chain."""
from typing import Any, List, Optional, Union
from pydantic import BaseModel
class AuthContext(BaseModel):
"""Class for an authorization context."""
name: Optional[str] = None
user_id: str
user_auth: List[str]
"""List of user authorizations, which may include their User ID and
the groups they are part of"""
class SemanticEntities(BaseModel):
"""Class for a semantic entity filter."""
deny: List[str]
class SemanticTopics(BaseModel):
"""Class for a semantic topic filter."""
deny: List[str]
class SemanticContext(BaseModel):
"""Class for a semantic context."""
pebblo_semantic_entities: Optional[SemanticEntities] = None
pebblo_semantic_topics: Optional[SemanticTopics] = None
def __init__(self, **data: Any) -> None:
super().__init__(**data)
# Validate semantic_context
if (
self.pebblo_semantic_entities is None
and self.pebblo_semantic_topics is None
):
raise ValueError(
"semantic_context must contain 'pebblo_semantic_entities' or "
"'pebblo_semantic_topics'"
)
class ChainInput(BaseModel):
"""Input for PebbloRetrievalQA chain."""
query: str
auth_context: Optional[AuthContext] = None
semantic_context: Optional[SemanticContext] = None
def dict(self, **kwargs: Any) -> dict:
base_dict = super().dict(**kwargs)
# Keep auth_context and semantic_context as it is(Pydantic models)
base_dict["auth_context"] = self.auth_context
base_dict["semantic_context"] = self.semantic_context
return base_dict
class Runtime(BaseModel):
"""
OS, language details
"""
type: Optional[str] = ""
host: str
path: str
ip: Optional[str] = ""
platform: str
os: str
os_version: str
language: str
language_version: str
runtime: Optional[str] = ""
class Framework(BaseModel):
"""
Langchain framework details
"""
name: str
version: str
class Model(BaseModel):
vendor: Optional[str]
name: Optional[str]
class PkgInfo(BaseModel):
project_home_page: Optional[str]
documentation_url: Optional[str]
pypi_url: Optional[str]
liscence_type: Optional[str]
installed_via: Optional[str]
location: Optional[str]
class VectorDB(BaseModel):
name: Optional[str] = None
version: Optional[str] = None
location: Optional[str] = None
embedding_model: Optional[str] = None
class ChainInfo(BaseModel):
name: str
model: Optional[Model]
vector_dbs: Optional[List[VectorDB]]
class App(BaseModel):
name: str
owner: str
description: Optional[str]
runtime: Runtime
framework: Framework
chains: List[ChainInfo]
plugin_version: str
client_version: Framework
class Context(BaseModel):
retrieved_from: Optional[str]
doc: Optional[str]
vector_db: str
pb_checksum: Optional[str]
class Prompt(BaseModel):
data: Optional[Union[list, str]]
entityCount: Optional[int] = None
entities: Optional[dict] = None
prompt_gov_enabled: Optional[bool] = None
class Qa(BaseModel):
name: str
context: Union[List[Optional[Context]], Optional[Context]]
prompt: Optional[Prompt]
response: Optional[Prompt]
prompt_time: str
user: str
user_identities: Optional[List[str]]
classifier_location: str
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/pebblo_retrieval/base.py | """
Pebblo Retrieval Chain with Identity & Semantic Enforcement for question-answering
against a vector database.
"""
import datetime
import inspect
import logging
from importlib.metadata import version
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import ConfigDict, Field, validator
from langchain_community.chains.pebblo_retrieval.enforcement_filters import (
SUPPORTED_VECTORSTORES,
set_enforcement_filters,
)
from langchain_community.chains.pebblo_retrieval.models import (
App,
AuthContext,
ChainInfo,
Framework,
Model,
SemanticContext,
VectorDB,
)
from langchain_community.chains.pebblo_retrieval.utilities import (
PLUGIN_VERSION,
PebbloRetrievalAPIWrapper,
get_runtime,
)
logger = logging.getLogger(__name__)
class PebbloRetrievalQA(Chain):
"""
Retrieval Chain with Identity & Semantic Enforcement for question-answering
against a vector database.
"""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_source_documents: bool = False
"""Return the source documents or not."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStore to use for retrieval."""
auth_context_key: str = "auth_context" #: :meta private:
"""Authentication context for identity enforcement."""
semantic_context_key: str = "semantic_context" #: :meta private:
"""Semantic context for semantic enforcement."""
app_name: str #: :meta private:
"""App name."""
owner: str #: :meta private:
"""Owner of app."""
description: str #: :meta private:
"""Description of app."""
api_key: Optional[str] = None #: :meta private:
"""Pebblo cloud API key for app."""
classifier_url: Optional[str] = None #: :meta private:
"""Classifier endpoint."""
classifier_location: str = "local" #: :meta private:
"""Classifier location. It could be either of 'local' or 'pebblo-cloud'."""
_discover_sent: bool = False #: :meta private:
"""Flag to check if discover payload has been sent."""
enable_prompt_gov: bool = True #: :meta private:
"""Flag to check if prompt governance is enabled or not"""
pb_client: PebbloRetrievalAPIWrapper = Field(
default_factory=PebbloRetrievalAPIWrapper
)
"""Pebblo Retrieval API client"""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
prompt_time = datetime.datetime.now().isoformat()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
auth_context = inputs.get(self.auth_context_key)
semantic_context = inputs.get(self.semantic_context_key)
_, prompt_entities = self.pb_client.check_prompt_validity(question)
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(
question, auth_context, semantic_context, run_manager=_run_manager
)
else:
docs = self._get_docs(question, auth_context, semantic_context) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
self.pb_client.send_prompt(
self.app_name,
self.retriever,
question,
answer,
auth_context,
docs,
prompt_entities,
prompt_time,
self.enable_prompt_gov,
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
prompt_time = datetime.datetime.now().isoformat()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
auth_context = inputs.get(self.auth_context_key)
semantic_context = inputs.get(self.semantic_context_key)
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
_, prompt_entities = await self.pb_client.acheck_prompt_validity(question)
if accepts_run_manager:
docs = await self._aget_docs(
question, auth_context, semantic_context, run_manager=_run_manager
)
else:
docs = await self._aget_docs(question, auth_context, semantic_context) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
await self.pb_client.asend_prompt(
self.app_name,
self.retriever,
question,
answer,
auth_context,
docs,
prompt_entities,
prompt_time,
self.enable_prompt_gov,
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key, self.auth_context_key, self.semantic_context_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys += ["source_documents"]
return _output_keys
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "pebblo_retrieval_qa"
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
app_name: str,
description: str,
owner: str,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
api_key: Optional[str] = None,
classifier_url: Optional[str] = None,
classifier_location: str = "local",
**kwargs: Any,
) -> "PebbloRetrievalQA":
"""Load chain from chain type."""
from langchain.chains.question_answering import load_qa_chain
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm, chain_type=chain_type, **_chain_type_kwargs
)
# generate app
app: App = PebbloRetrievalQA._get_app_details(
app_name=app_name,
description=description,
owner=owner,
llm=llm,
**kwargs,
)
# initialize Pebblo API client
pb_client = PebbloRetrievalAPIWrapper(
api_key=api_key,
classifier_location=classifier_location,
classifier_url=classifier_url,
)
# send app discovery request
pb_client.send_app_discover(app)
return cls(
combine_documents_chain=combine_documents_chain,
app_name=app_name,
owner=owner,
description=description,
api_key=api_key,
classifier_url=classifier_url,
classifier_location=classifier_location,
pb_client=pb_client,
**kwargs,
)
@validator("retriever", pre=True, always=True)
def validate_vectorstore(
cls, retriever: VectorStoreRetriever
) -> VectorStoreRetriever:
"""
Validate that the vectorstore of the retriever is supported vectorstores.
"""
if retriever.vectorstore.__class__.__name__ not in SUPPORTED_VECTORSTORES:
raise ValueError(
f"Vectorstore must be an instance of one of the supported "
f"vectorstores: {SUPPORTED_VECTORSTORES}. "
f"Got '{retriever.vectorstore.__class__.__name__}' instead."
)
return retriever
def _get_docs(
self,
question: str,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
set_enforcement_filters(self.retriever, auth_context, semantic_context)
return self.retriever.get_relevant_documents(
question, callbacks=run_manager.get_child()
)
async def _aget_docs(
self,
question: str,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
set_enforcement_filters(self.retriever, auth_context, semantic_context)
return await self.retriever.aget_relevant_documents(
question, callbacks=run_manager.get_child()
)
@staticmethod
def _get_app_details( # type: ignore
app_name: str, owner: str, description: str, llm: BaseLanguageModel, **kwargs
) -> App:
"""Fetch app details. Internal method.
Returns:
App: App details.
"""
framework, runtime = get_runtime()
chains = PebbloRetrievalQA.get_chain_details(llm, **kwargs)
app = App(
name=app_name,
owner=owner,
description=description,
runtime=runtime,
framework=framework,
chains=chains,
plugin_version=PLUGIN_VERSION,
client_version=Framework(
name="langchain_community",
version=version("langchain_community"),
),
)
return app
@classmethod
def set_discover_sent(cls) -> None:
cls._discover_sent = True
@classmethod
def get_chain_details(
cls, llm: BaseLanguageModel, **kwargs: Any
) -> List[ChainInfo]:
"""
Get chain details.
Args:
llm (BaseLanguageModel): Language model instance.
**kwargs: Additional keyword arguments.
Returns:
List[ChainInfo]: Chain details.
"""
llm_dict = llm.__dict__
chains = [
ChainInfo(
name=cls.__name__,
model=Model(
name=llm_dict.get("model_name", llm_dict.get("model")),
vendor=llm.__class__.__name__,
),
vector_dbs=[
VectorDB(
name=kwargs["retriever"].vectorstore.__class__.__name__,
embedding_model=str(
kwargs["retriever"].vectorstore._embeddings.model
)
if hasattr(kwargs["retriever"].vectorstore, "_embeddings")
else (
str(kwargs["retriever"].vectorstore._embedding.model)
if hasattr(kwargs["retriever"].vectorstore, "_embedding")
else None
),
)
],
),
]
return chains
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/pebblo_retrieval/enforcement_filters.py | """
Identity & Semantic Enforcement filters for PebbloRetrievalQA chain:
This module contains methods for applying Identity and Semantic Enforcement filters
in the PebbloRetrievalQA chain.
These filters are used to control the retrieval of documents based on authorization and
semantic context.
The Identity Enforcement filter ensures that only authorized identities can access
certain documents, while the Semantic Enforcement filter controls document retrieval
based on semantic context.
The methods in this module are designed to work with different types of vector stores.
"""
import logging
from typing import Any, List, Optional, Union
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_community.chains.pebblo_retrieval.models import (
AuthContext,
SemanticContext,
)
logger = logging.getLogger(__name__)
PINECONE = "Pinecone"
QDRANT = "Qdrant"
PGVECTOR = "PGVector"
SUPPORTED_VECTORSTORES = {PINECONE, QDRANT, PGVECTOR}
def clear_enforcement_filters(retriever: VectorStoreRetriever) -> None:
"""
Clear the identity and semantic enforcement filters in the retriever search_kwargs.
"""
if retriever.vectorstore.__class__.__name__ == PGVECTOR:
search_kwargs = retriever.search_kwargs
if "filter" in search_kwargs:
filters = search_kwargs["filter"]
_pgvector_clear_pebblo_filters(
search_kwargs, filters, "authorized_identities"
)
_pgvector_clear_pebblo_filters(
search_kwargs, filters, "pebblo_semantic_topics"
)
_pgvector_clear_pebblo_filters(
search_kwargs, filters, "pebblo_semantic_entities"
)
def set_enforcement_filters(
retriever: VectorStoreRetriever,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
) -> None:
"""
Set identity and semantic enforcement filters in the retriever.
"""
# Clear existing enforcement filters
clear_enforcement_filters(retriever)
if auth_context is not None:
_set_identity_enforcement_filter(retriever, auth_context)
if semantic_context is not None:
_set_semantic_enforcement_filter(retriever, semantic_context)
def _apply_qdrant_semantic_filter(
search_kwargs: dict, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs for Qdrant vectorstore.
"""
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ValueError(
"Could not import `qdrant-client.http` python package. "
"Please install it with `pip install qdrant-client`."
) from e
# Create a semantic enforcement filter condition
semantic_filters: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
if (
semantic_context is not None
and semantic_context.pebblo_semantic_topics is not None
):
semantic_topics_filter = rest.FieldCondition(
key="metadata.pebblo_semantic_topics",
match=rest.MatchAny(any=semantic_context.pebblo_semantic_topics.deny),
)
semantic_filters.append(semantic_topics_filter)
if (
semantic_context is not None
and semantic_context.pebblo_semantic_entities is not None
):
semantic_entities_filter = rest.FieldCondition(
key="metadata.pebblo_semantic_entities",
match=rest.MatchAny(any=semantic_context.pebblo_semantic_entities.deny),
)
semantic_filters.append(semantic_entities_filter)
# If 'filter' already exists in search_kwargs
if "filter" in search_kwargs:
existing_filter: rest.Filter = search_kwargs["filter"]
# Check if existing_filter is a qdrant-client filter
if isinstance(existing_filter, rest.Filter):
# If 'must_not' condition exists in the existing filter
if isinstance(existing_filter.must_not, list):
# Warn if 'pebblo_semantic_topics' or 'pebblo_semantic_entities'
# filter is overridden
new_must_not_conditions: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
# Drop semantic filter conditions if already present
for condition in existing_filter.must_not:
if hasattr(condition, "key"):
if condition.key == "metadata.pebblo_semantic_topics":
continue
if condition.key == "metadata.pebblo_semantic_entities":
continue
new_must_not_conditions.append(condition)
# Add semantic enforcement filters to 'must_not' conditions
existing_filter.must_not = new_must_not_conditions
existing_filter.must_not.extend(semantic_filters)
else:
# Set 'must_not' condition with semantic enforcement filters
existing_filter.must_not = semantic_filters
else:
raise TypeError(
"Using dict as a `filter` is deprecated. "
"Please use qdrant-client filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/"
)
else:
# If 'filter' does not exist in search_kwargs, create it
search_kwargs["filter"] = rest.Filter(must_not=semantic_filters)
def _apply_qdrant_authorization_filter(
search_kwargs: dict, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs for Qdrant vectorstore.
"""
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ValueError(
"Could not import `qdrant-client.http` python package. "
"Please install it with `pip install qdrant-client`."
) from e
if auth_context is not None:
# Create a identity enforcement filter condition
identity_enforcement_filter = rest.FieldCondition(
key="metadata.authorized_identities",
match=rest.MatchAny(any=auth_context.user_auth),
)
else:
return
# If 'filter' already exists in search_kwargs
if "filter" in search_kwargs:
existing_filter: rest.Filter = search_kwargs["filter"]
# Check if existing_filter is a qdrant-client filter
if isinstance(existing_filter, rest.Filter):
# If 'must' exists in the existing filter
if existing_filter.must:
new_must_conditions: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
# Drop 'authorized_identities' filter condition if already present
for condition in existing_filter.must:
if (
hasattr(condition, "key")
and condition.key == "metadata.authorized_identities"
):
continue
new_must_conditions.append(condition)
# Add identity enforcement filter to 'must' conditions
existing_filter.must = new_must_conditions
existing_filter.must.append(identity_enforcement_filter)
else:
# Set 'must' condition with identity enforcement filter
existing_filter.must = [identity_enforcement_filter]
else:
raise TypeError(
"Using dict as a `filter` is deprecated. "
"Please use qdrant-client filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/"
)
else:
# If 'filter' does not exist in search_kwargs, create it
search_kwargs["filter"] = rest.Filter(must=[identity_enforcement_filter])
def _apply_pinecone_semantic_filter(
search_kwargs: dict, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs for Pinecone vectorstore.
"""
# Check if semantic_context is provided
semantic_context = semantic_context
if semantic_context is not None:
if semantic_context.pebblo_semantic_topics is not None:
# Add pebblo_semantic_topics filter to search_kwargs
search_kwargs.setdefault("filter", {})["pebblo_semantic_topics"] = {
"$nin": semantic_context.pebblo_semantic_topics.deny
}
if semantic_context.pebblo_semantic_entities is not None:
# Add pebblo_semantic_entities filter to search_kwargs
search_kwargs.setdefault("filter", {})["pebblo_semantic_entities"] = {
"$nin": semantic_context.pebblo_semantic_entities.deny
}
def _apply_pinecone_authorization_filter(
search_kwargs: dict, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs for Pinecone vectorstore.
"""
if auth_context is not None:
search_kwargs.setdefault("filter", {})["authorized_identities"] = {
"$in": auth_context.user_auth
}
def _apply_pgvector_filter(
search_kwargs: dict, filters: Optional[Any], pebblo_filter: dict
) -> None:
"""
Apply pebblo filters in the search_kwargs filters.
"""
if isinstance(filters, dict):
if len(filters) == 1:
# The only operators allowed at the top level are $and, $or, and $not
# First check if an operator or a field
key, value = list(filters.items())[0]
if key.startswith("$"):
# Then it's an operator
if key.lower() not in ["$and", "$or", "$not"]:
raise ValueError(
f"Invalid filter condition. Expected $and, $or or $not "
f"but got: {key}"
)
if not isinstance(value, list):
raise ValueError(
f"Expected a list, but got {type(value)} for value: {value}"
)
# Here we handle the $and, $or, and $not operators(Semantic filters)
if key.lower() == "$and":
# Add pebblo_filter to the $and list as it is
value.append(pebblo_filter)
elif key.lower() == "$not":
# Check if pebblo_filter is an operator or a field
_key, _value = list(pebblo_filter.items())[0]
if _key.startswith("$"):
# Then it's a operator
if _key.lower() == "$not":
# It's Semantic filter, add it's value to filters
value.append(_value)
logger.warning(
"Adding $not operator to the existing $not operator"
)
return
else:
# Only $not operator is supported in pebblo_filter
raise ValueError(
f"Invalid filter key. Expected '$not' but got: {_key}"
)
else:
# Then it's a field(Auth filter), move filters into $and
search_kwargs["filter"] = {"$and": [filters, pebblo_filter]}
return
elif key.lower() == "$or":
search_kwargs["filter"] = {"$and": [filters, pebblo_filter]}
else:
# Then it's a field and we can check pebblo_filter now
# Check if pebblo_filter is an operator or a field
_key, _ = list(pebblo_filter.items())[0]
if _key.startswith("$"):
# Then it's a operator
if _key.lower() == "$not":
# It's a $not operator(Semantic filter), move filters into $and
search_kwargs["filter"] = {"$and": [filters, pebblo_filter]}
return
else:
# Only $not operator is allowed in pebblo_filter
raise ValueError(
f"Invalid filter key. Expected '$not' but got: {_key}"
)
else:
# Then it's a field(This handles Auth filter)
filters.update(pebblo_filter)
return
elif len(filters) > 1:
# Then all keys have to be fields (they cannot be operators)
for key in filters.keys():
if key.startswith("$"):
raise ValueError(
f"Invalid filter condition. Expected a field but got: {key}"
)
# filters should all be fields and we can check pebblo_filter now
# Check if pebblo_filter is an operator or a field
_key, _ = list(pebblo_filter.items())[0]
if _key.startswith("$"):
# Then it's a operator
if _key.lower() == "$not":
# It's a $not operator(Semantic filter), move filters into '$and'
search_kwargs["filter"] = {"$and": [filters, pebblo_filter]}
return
else:
# Only $not operator is supported in pebblo_filter
raise ValueError(
f"Invalid filter key. Expected '$not' but got: {_key}"
)
else:
# Then it's a field(This handles Auth filter)
filters.update(pebblo_filter)
return
else:
# Got an empty dictionary for filters, set pebblo_filter in filter
search_kwargs.setdefault("filter", {}).update(pebblo_filter)
elif filters is None:
# If filters is None, set pebblo_filter as a new filter
search_kwargs.setdefault("filter", {}).update(pebblo_filter)
else:
raise ValueError(
f"Invalid filter. Expected a dictionary/None but got type: {type(filters)}"
)
def _pgvector_clear_pebblo_filters(
search_kwargs: dict, filters: dict, pebblo_filter_key: str
) -> None:
"""
Remove pebblo filters from the search_kwargs filters.
"""
if isinstance(filters, dict):
if len(filters) == 1:
# The only operators allowed at the top level are $and, $or, and $not
# First check if an operator or a field
key, value = list(filters.items())[0]
if key.startswith("$"):
# Then it's an operator
# Validate the operator's key and value type
if key.lower() not in ["$and", "$or", "$not"]:
raise ValueError(
f"Invalid filter condition. Expected $and, $or or $not "
f"but got: {key}"
)
elif not isinstance(value, list):
raise ValueError(
f"Expected a list, but got {type(value)} for value: {value}"
)
# Here we handle the $and, $or, and $not operators
if key.lower() == "$and":
# Remove the pebblo filter from the $and list
for i, _filter in enumerate(value):
if pebblo_filter_key in _filter:
# This handles Auth filter
value.pop(i)
break
# Check for $not operator with Semantic filter
if "$not" in _filter:
sem_filter_found = False
# This handles Semantic filter
for j, nested_filter in enumerate(_filter["$not"]):
if pebblo_filter_key in nested_filter:
if len(_filter["$not"]) == 1:
# If only one filter is left,
# then remove the $not operator
value.pop(i)
else:
value[i]["$not"].pop(j)
sem_filter_found = True
break
if sem_filter_found:
break
if len(value) == 1:
# If only one filter is left, then remove the $and operator
search_kwargs["filter"] = value[0]
elif key.lower() == "$not":
# Remove the pebblo filter from the $not list
for i, _filter in enumerate(value):
if pebblo_filter_key in _filter:
# This removes Semantic filter
value.pop(i)
break
if len(value) == 0:
# If no filter is left, then unset the filter
search_kwargs["filter"] = {}
elif key.lower() == "$or":
# If $or, pebblo filter will not be present
return
else:
# Then it's a field, check if it's a pebblo filter
if key == pebblo_filter_key:
filters.pop(key)
return
elif len(filters) > 1:
# Then all keys have to be fields (they cannot be operators)
if pebblo_filter_key in filters:
# This handles Auth filter
filters.pop(pebblo_filter_key)
return
else:
# Got an empty dictionary for filters, ignore the filter
return
elif filters is None:
# If filters is None, ignore the filter
return
else:
raise ValueError(
f"Invalid filter. Expected a dictionary/None but got type: {type(filters)}"
)
def _apply_pgvector_semantic_filter(
search_kwargs: dict, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs for PGVector vectorstore.
"""
# Check if semantic_context is provided
if semantic_context is not None:
_semantic_filters = []
filters = search_kwargs.get("filter")
if semantic_context.pebblo_semantic_topics is not None:
# Add pebblo_semantic_topics filter to search_kwargs
topic_filter: dict = {
"pebblo_semantic_topics": {
"$eq": semantic_context.pebblo_semantic_topics.deny
}
}
_semantic_filters.append(topic_filter)
if semantic_context.pebblo_semantic_entities is not None:
# Add pebblo_semantic_entities filter to search_kwargs
entity_filter: dict = {
"pebblo_semantic_entities": {
"$eq": semantic_context.pebblo_semantic_entities.deny
}
}
_semantic_filters.append(entity_filter)
if len(_semantic_filters) > 0:
semantic_filter: dict = {"$not": _semantic_filters}
_apply_pgvector_filter(search_kwargs, filters, semantic_filter)
def _apply_pgvector_authorization_filter(
search_kwargs: dict, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs for PGVector vectorstore.
"""
if auth_context is not None:
auth_filter: dict = {"authorized_identities": {"$eq": auth_context.user_auth}}
filters = search_kwargs.get("filter")
_apply_pgvector_filter(search_kwargs, filters, auth_filter)
def _set_identity_enforcement_filter(
retriever: VectorStoreRetriever, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs.
This method sets the identity enforcement filter in the search_kwargs
of the retriever based on the type of the vectorstore.
"""
search_kwargs = retriever.search_kwargs
if retriever.vectorstore.__class__.__name__ == PINECONE:
_apply_pinecone_authorization_filter(search_kwargs, auth_context)
elif retriever.vectorstore.__class__.__name__ == QDRANT:
_apply_qdrant_authorization_filter(search_kwargs, auth_context)
elif retriever.vectorstore.__class__.__name__ == PGVECTOR:
_apply_pgvector_authorization_filter(search_kwargs, auth_context)
def _set_semantic_enforcement_filter(
retriever: VectorStoreRetriever, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs.
This method sets the semantic enforcement filter in the search_kwargs
of the retriever based on the type of the vectorstore.
"""
search_kwargs = retriever.search_kwargs
if retriever.vectorstore.__class__.__name__ == PINECONE:
_apply_pinecone_semantic_filter(search_kwargs, semantic_context)
elif retriever.vectorstore.__class__.__name__ == QDRANT:
_apply_qdrant_semantic_filter(search_kwargs, semantic_context)
elif retriever.vectorstore.__class__.__name__ == PGVECTOR:
_apply_pgvector_semantic_filter(search_kwargs, semantic_context)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/openapi/chain.py | """Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
import json
from typing import Any, Dict, List, NamedTuple, Optional, cast
from langchain.chains.api.openapi.requests_chain import APIRequesterChain
from langchain.chains.api.openapi.response_chain import APIResponderChain
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
from langchain_core.language_models import BaseLanguageModel
from pydantic import BaseModel, Field
from requests import Response
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.utilities.requests import Requests
class _ParamMapping(NamedTuple):
"""Mapping from parameter name to parameter value."""
query_params: List[str]
body_params: List[str]
path_params: List[str]
class OpenAPIEndpointChain(Chain, BaseModel):
"""Chain interacts with an OpenAPI endpoint using natural language."""
api_request_chain: LLMChain
api_response_chain: Optional[LLMChain] = None
api_operation: APIOperation
requests: Requests = Field(exclude=True, default_factory=Requests)
param_mapping: _ParamMapping = Field(alias="param_mapping")
return_intermediate_steps: bool = False
instructions_key: str = "instructions" #: :meta private:
output_key: str = "output" #: :meta private:
max_text_length: Optional[int] = Field(ge=0) #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.instructions_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _construct_path(self, args: Dict[str, str]) -> str:
"""Construct the path from the deserialized input."""
path = self.api_operation.base_url + self.api_operation.path
for param in self.param_mapping.path_params:
path = path.replace(f"{{{param}}}", str(args.pop(param, "")))
return path
def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]:
"""Extract the query params from the deserialized input."""
query_params = {}
for param in self.param_mapping.query_params:
if param in args:
query_params[param] = args.pop(param)
return query_params
def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str, str]]:
"""Extract the request body params from the deserialized input."""
body_params = None
if self.param_mapping.body_params:
body_params = {}
for param in self.param_mapping.body_params:
if param in args:
body_params[param] = args.pop(param)
return body_params
def deserialize_json_input(self, serialized_args: str) -> dict:
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
args: dict = json.loads(serialized_args)
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {
"url": path,
"data": body_params,
"params": query_params,
}
def _get_output(self, output: str, intermediate_steps: dict) -> dict:
"""Return the output from the API call."""
if self.return_intermediate_steps:
return {
self.output_key: output,
"intermediate_steps": intermediate_steps,
}
else:
return {self.output_key: output}
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
intermediate_steps = {}
instructions = inputs[self.instructions_key]
instructions = instructions[: self.max_text_length]
_api_arguments = self.api_request_chain.predict_and_parse(
instructions=instructions, callbacks=_run_manager.get_child()
)
api_arguments = cast(str, _api_arguments)
intermediate_steps["request_args"] = api_arguments
_run_manager.on_text(
api_arguments, color="green", end="\n", verbose=self.verbose
)
if api_arguments.startswith("ERROR"):
return self._get_output(api_arguments, intermediate_steps)
elif api_arguments.startswith("MESSAGE:"):
return self._get_output(
api_arguments[len("MESSAGE:") :], intermediate_steps
)
try:
request_args = self.deserialize_json_input(api_arguments)
method = getattr(self.requests, self.api_operation.method.value)
api_response: Response = method(**request_args)
if api_response.status_code != 200:
method_str = str(self.api_operation.method.value)
response_text = (
f"{api_response.status_code}: {api_response.reason}"
+ f"\nFor {method_str.upper()} {request_args['url']}\n"
+ f"Called with args: {request_args['params']}"
)
else:
response_text = api_response.text
except Exception as e:
response_text = f"Error with message {str(e)}"
response_text = response_text[: self.max_text_length]
intermediate_steps["response_text"] = response_text
_run_manager.on_text(
response_text, color="blue", end="\n", verbose=self.verbose
)
if self.api_response_chain is not None:
_answer = self.api_response_chain.predict_and_parse(
response=response_text,
instructions=instructions,
callbacks=_run_manager.get_child(),
)
answer = cast(str, _answer)
_run_manager.on_text(answer, color="yellow", end="\n", verbose=self.verbose)
return self._get_output(answer, intermediate_steps)
else:
return self._get_output(response_text, intermediate_steps)
@classmethod
def from_url_and_method(
cls,
spec_url: str,
path: str,
method: str,
llm: BaseLanguageModel,
requests: Optional[Requests] = None,
return_intermediate_steps: bool = False,
**kwargs: Any,
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpoint from a spec at the specified url."""
operation = APIOperation.from_openapi_url(spec_url, path, method)
return cls.from_api_operation(
operation,
requests=requests,
llm=llm,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
@classmethod
def from_api_operation(
cls,
operation: APIOperation,
llm: BaseLanguageModel,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
raw_response: bool = False,
callbacks: Callbacks = None,
**kwargs: Any,
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpointChain from an operation and a spec."""
param_mapping = _ParamMapping(
query_params=operation.query_params,
body_params=operation.body_params,
path_params=operation.path_params,
)
requests_chain = APIRequesterChain.from_llm_and_typescript(
llm,
typescript_definition=operation.to_typescript(),
verbose=verbose,
callbacks=callbacks,
)
if raw_response:
response_chain = None
else:
response_chain = APIResponderChain.from_llm(
llm, verbose=verbose, callbacks=callbacks
)
_requests = requests or Requests()
return cls(
api_request_chain=requests_chain,
api_response_chain=response_chain,
api_operation=operation,
requests=_requests,
param_mapping=param_mapping,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
callbacks=callbacks,
**kwargs,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/openapi/response_chain.py | """Response parser."""
import json
import re
from typing import Any
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts.prompt import PromptTemplate
class APIResponderOutputParser(BaseOutputParser):
"""Parse the response and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
response_content = json.loads(serialized_block, strict=False)
return response_content.get("response", "ERROR parsing response.")
except json.JSONDecodeError:
return "ERROR parsing response."
except:
raise
def parse(self, llm_output: str) -> str:
"""Parse the response and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f"No response found in output: {llm_output}.")
@property
def _type(self) -> str:
return "api_responder"
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any
) -> LLMChain:
"""Get the response parser."""
output_parser = APIResponderOutputParser()
prompt = PromptTemplate(
template=RESPONSE_TEMPLATE,
output_parser=output_parser,
input_variables=["response", "instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/openapi/requests_chain.py | """request parser."""
import json
import re
from typing import Any
from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts.prompt import PromptTemplate
class APIRequesterOutputParser(BaseOutputParser):
"""Parse the request and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
return json.dumps(json.loads(serialized_block, strict=False))
except json.JSONDecodeError:
return "ERROR serializing request."
def parse(self, llm_output: str) -> str:
"""Parse the request and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
message_match = re.search(r"```text(.*?)```", llm_output, re.DOTALL)
if message_match:
return f"MESSAGE: {message_match.group(1).strip()}"
return "ERROR making request"
@property
def _type(self) -> str:
return "api_requester"
class APIRequesterChain(LLMChain):
"""Get the request parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm_and_typescript(
cls,
llm: BaseLanguageModel,
typescript_definition: str,
verbose: bool = True,
**kwargs: Any,
) -> LLMChain:
"""Get the request parser."""
output_parser = APIRequesterOutputParser()
prompt = PromptTemplate(
template=REQUEST_TEMPLATE,
output_parser=output_parser,
partial_variables={"schema": typescript_definition},
input_variables=["instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/chains | lc_public_repos/langchain/libs/community/langchain_community/chains/openapi/prompts.py | # flake8: noqa
REQUEST_TEMPLATE = """You are a helpful AI Assistant. Please provide JSON arguments to agentFunc() based on the user's instructions.
API_SCHEMA: ```typescript
{schema}
```
USER_INSTRUCTIONS: "{instructions}"
Your arguments must be plain json provided in a markdown block:
ARGS: ```json
{{valid json conforming to API_SCHEMA}}
```
Example
-----
ARGS: ```json
{{"foo": "bar", "baz": {{"qux": "quux"}}}}
```
The block must be no more than 1 line long, and all arguments must be valid JSON. All string arguments must be wrapped in double quotes.
You MUST strictly comply to the types indicated by the provided schema, including all required args.
If you don't have sufficient information to call the function due to things like requiring specific uuid's, you can reply with the following message:
Message: ```text
Concise response requesting the additional information that would make calling the function successful.
```
Begin
-----
ARGS:
"""
RESPONSE_TEMPLATE = """You are a helpful AI assistant trained to answer user queries from API responses.
You attempted to call an API, which resulted in:
API_RESPONSE: {response}
USER_COMMENT: "{instructions}"
If the API_RESPONSE can answer the USER_COMMENT respond with the following markdown json block:
Response: ```json
{{"response": "Human-understandable synthesis of the API_RESPONSE"}}
```
Otherwise respond with the following markdown json block:
Response Error: ```json
{{"response": "What you did and a concise statement of the resulting error. If it can be easily fixed, provide a suggestion."}}
```
You MUST respond as a markdown json code block. The person you are responding to CANNOT see the API_RESPONSE, so if there is any relevant information there you must include it in your response.
Begin:
---
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/adapters/openai.py | from __future__ import annotations
import importlib
from typing import (
Any,
AsyncIterator,
Dict,
Iterable,
List,
Mapping,
Sequence,
Union,
overload,
)
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from pydantic import BaseModel
from typing_extensions import Literal
async def aenumerate(
iterable: AsyncIterator[Any], start: int = 0
) -> AsyncIterator[tuple[int, Any]]:
"""Async version of enumerate function."""
i = start
async for x in iterable:
yield i, x
i += 1
class IndexableBaseModel(BaseModel):
"""Allows a BaseModel to return its fields by string variable indexing."""
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Choice(IndexableBaseModel):
"""Choice."""
message: dict
class ChatCompletions(IndexableBaseModel):
"""Chat completions."""
choices: List[Choice]
class ChoiceChunk(IndexableBaseModel):
"""Choice chunk."""
delta: dict
class ChatCompletionChunk(IndexableBaseModel):
"""Chat completion chunk."""
choices: List[ChoiceChunk]
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict.get("role")
if role == "user":
return HumanMessage(content=_dict.get("content", ""))
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs: Dict = {}
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
if tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = tool_calls
if context := _dict.get("context"):
additional_kwargs["context"] = context
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict.get("content", ""))
elif role == "function":
return FunctionMessage(content=_dict.get("content", ""), name=_dict.get("name")) # type: ignore[arg-type]
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
else:
return ChatMessage(content=_dict.get("content", ""), role=role) # type: ignore[arg-type]
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if "context" in message.additional_kwargs:
message_dict["context"] = message.additional_kwargs["context"]
# If context only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def convert_openai_messages(messages: Sequence[Dict[str, Any]]) -> List[BaseMessage]:
"""Convert dictionaries representing OpenAI messages to LangChain format.
Args:
messages: List of dictionaries representing OpenAI messages
Returns:
List of LangChain BaseMessage objects.
"""
return [convert_dict_to_message(m) for m in messages]
def _convert_message_chunk(chunk: BaseMessageChunk, i: int) -> dict:
_dict: Dict[str, Any] = {}
if isinstance(chunk, AIMessageChunk):
if i == 0:
# Only shows up in the first chunk
_dict["role"] = "assistant"
if "function_call" in chunk.additional_kwargs:
_dict["function_call"] = chunk.additional_kwargs["function_call"]
# If the first chunk is a function call, the content is not empty string,
# not missing, but None.
if i == 0:
_dict["content"] = None
if "tool_calls" in chunk.additional_kwargs:
_dict["tool_calls"] = chunk.additional_kwargs["tool_calls"]
# If the first chunk is tool calls, the content is not empty string,
# not missing, but None.
if i == 0:
_dict["content"] = None
else:
_dict["content"] = chunk.content
else:
raise ValueError(f"Got unexpected streaming chunk type: {type(chunk)}")
# This only happens at the end of streams, and OpenAI returns as empty dict
if _dict == {"content": ""}:
_dict = {}
return _dict
def _convert_message_chunk_to_delta(chunk: BaseMessageChunk, i: int) -> Dict[str, Any]:
_dict = _convert_message_chunk(chunk, i)
return {"choices": [{"delta": _dict}]}
class ChatCompletion:
"""Chat completion."""
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict: ...
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> Iterable: ...
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, Iterable]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
for i, c in enumerate(model_config.stream(converted_messages))
)
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict: ...
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> AsyncIterator: ...
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, AsyncIterator]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = await model_config.ainvoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
async for i, c in aenumerate(model_config.astream(converted_messages))
)
def _has_assistant_message(session: ChatSession) -> bool:
"""Check if chat session has an assistant message."""
return any([isinstance(m, AIMessage) for m in session["messages"]])
def convert_messages_for_finetuning(
sessions: Iterable[ChatSession],
) -> List[List[dict]]:
"""Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries.
"""
return [
[convert_message_to_dict(s) for s in session["messages"]]
for session in sessions
if _has_assistant_message(session)
]
class Completions:
"""Completions."""
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> ChatCompletions: ...
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> Iterable: ...
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[ChatCompletions, Iterable]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return ChatCompletions(
choices=[Choice(message=convert_message_to_dict(result))]
)
else:
return (
ChatCompletionChunk(
choices=[ChoiceChunk(delta=_convert_message_chunk(c, i))]
)
for i, c in enumerate(model_config.stream(converted_messages))
)
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> ChatCompletions: ...
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> AsyncIterator: ...
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[ChatCompletions, AsyncIterator]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = await model_config.ainvoke(converted_messages)
return ChatCompletions(
choices=[Choice(message=convert_message_to_dict(result))]
)
else:
return (
ChatCompletionChunk(
choices=[ChoiceChunk(delta=_convert_message_chunk(c, i))]
)
async for i, c in aenumerate(model_config.astream(converted_messages))
)
class Chat:
"""Chat."""
def __init__(self) -> None:
self.completions = Completions()
chat = Chat()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/adapters/__init__.py | """**Adapters** are used to adapt LangChain models to other APIs.
LangChain integrates with many model providers.
While LangChain has its own message and model APIs,
LangChain has also made it as easy as
possible to explore other models by exposing an **adapter** to adapt LangChain
models to the other APIs, as to the OpenAI API.
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/memory/kg.py | from typing import Any, Dict, List, Type, Union
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
try:
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = (
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
)
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
except ImportError:
# Placeholder object
class ConversationKGMemory: # type: ignore[no-redef]
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/memory/zep_memory.py | from __future__ import annotations
from typing import Any, Dict, Optional
from langchain_community.chat_message_histories import ZepChatMessageHistory
try:
from langchain.memory import ConversationBufferMemory
class ZepMemory(ConversationBufferMemory): # type: ignore[override]
"""Persist your chain history to the Zep MemoryStore.
The number of messages returned by Zep and when the Zep server summarizes chat
histories is configurable. See the Zep documentation for more details.
Documentation: https://docs.getzep.com
Example:
.. code-block:: python
memory = ZepMemory(
session_id=session_id, # Identifies your user or a user's session
url=ZEP_API_URL, # Your Zep server's URL
api_key=<your_api_key>, # Optional
memory_key="history", # Ensure this matches the key used in
# chain's prompt template
return_messages=True, # Does your prompt template expect a string
# or a list of Messages?
)
chain = LLMChain(memory=memory,...) # Configure your chain to use the ZepMemory
instance
Note:
To persist metadata alongside your chat history, your will need to create a
custom Chain class that overrides the `prep_outputs` method to include the metadata
in the call to `self.memory.save_context`.
Zep - Fast, scalable building blocks for LLM Apps
=========
Zep is an open source platform for productionizing LLM apps. Go from a prototype
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
rewriting code.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
""" # noqa: E501
chat_memory: ZepChatMessageHistory
def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
output_key: Optional[str] = None,
input_key: Optional[str] = None,
return_messages: bool = False,
human_prefix: str = "Human",
ai_prefix: str = "AI",
memory_key: str = "history",
):
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
""" # noqa: E501
chat_message_history = ZepChatMessageHistory(
session_id=session_id,
url=url,
api_key=api_key,
)
super().__init__(
chat_memory=chat_message_history,
output_key=output_key,
input_key=input_key,
return_messages=return_messages,
human_prefix=human_prefix,
ai_prefix=ai_prefix,
memory_key=memory_key,
)
def save_context(
self,
inputs: Dict[str, Any],
outputs: Dict[str, str],
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Save context from this conversation to buffer.
Args:
inputs (Dict[str, Any]): The inputs to the chain.
outputs (Dict[str, str]): The outputs from the chain.
metadata (Optional[Dict[str, Any]], optional): Any metadata to save with
the context. Defaults to None
Returns:
None
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
except ImportError:
# Placeholder object
class ZepMemory: # type: ignore[no-redef]
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/memory/zep_cloud_memory.py | from __future__ import annotations
from typing import Any, Dict, Optional
from langchain_community.chat_message_histories import ZepCloudChatMessageHistory
try:
from langchain.memory import ConversationBufferMemory
from zep_cloud import MemoryGetRequestMemoryType
class ZepCloudMemory(ConversationBufferMemory): # type: ignore[override]
"""Persist your chain history to the Zep MemoryStore.
Documentation: https://help.getzep.com
Example:
.. code-block:: python
memory = ZepCloudMemory(
session_id=session_id, # Identifies your user or a user's session
api_key=<your_api_key>, # Your Zep Project API key
memory_key="history", # Ensure this matches the key used in
# chain's prompt template
return_messages=True, # Does your prompt template expect a string
# or a list of Messages?
)
chain = LLMChain(memory=memory,...) # Configure your chain to use the ZepMemory
instance
Note:
To persist metadata alongside your chat history, your will need to create a
custom Chain class that overrides the `prep_outputs` method to include the metadata
in the call to `self.memory.save_context`.
Zep - Recall, understand, and extract data from chat histories. Power personalized AI experiences.
=========
Zep is a long-term memory service for AI Assistant apps. With Zep, you can provide AI assistants with the ability to recall past conversations,
no matter how distant, while also reducing hallucinations, latency, and cost.
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
""" # noqa: E501
chat_memory: ZepCloudChatMessageHistory
def __init__(
self,
session_id: str,
api_key: str,
memory_type: Optional[MemoryGetRequestMemoryType] = None,
lastn: Optional[int] = None,
output_key: Optional[str] = None,
input_key: Optional[str] = None,
return_messages: bool = False,
human_prefix: str = "Human",
ai_prefix: str = "AI",
memory_key: str = "history",
):
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
api_key (str): Your Zep Project key.
memory_type (Optional[MemoryGetRequestMemoryType], optional): Zep Memory Type, defaults to perpetual
lastn (Optional[int], optional): Number of messages to retrieve. Will add the last summary generated prior to the nth oldest message. Defaults to 6
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
""" # noqa: E501
chat_message_history = ZepCloudChatMessageHistory(
session_id=session_id,
memory_type=memory_type,
lastn=lastn,
api_key=api_key,
)
super().__init__(
chat_memory=chat_message_history,
output_key=output_key,
input_key=input_key,
return_messages=return_messages,
human_prefix=human_prefix,
ai_prefix=ai_prefix,
memory_key=memory_key,
)
def save_context(
self,
inputs: Dict[str, Any],
outputs: Dict[str, str],
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Save context from this conversation to buffer.
Args:
inputs (Dict[str, Any]): The inputs to the chain.
outputs (Dict[str, str]): The outputs from the chain.
metadata (Optional[Dict[str, Any]], optional): Any metadata to save with
the context. Defaults to None
Returns:
None
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
except ImportError:
# Placeholder object
class ZepCloudMemory: # type: ignore[no-redef]
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/memory/motorhead_memory.py | from typing import Any, Dict, List, Optional
import requests
from langchain_core.messages import get_buffer_string
try:
# Temporarily tuck import in a conditional import until
# community pkg becomes dependent on langchain core
from langchain.memory.chat_memory import BaseChatMemory
MANAGED_URL = "https://api.getmetal.io/v1/motorhead"
class MotorheadMemory(BaseChatMemory):
"""Chat message memory backed by Motorhead service."""
url: str = MANAGED_URL
timeout: int = 3000
memory_key: str = "history"
session_id: str
context: Optional[str] = None
# Managed Params
api_key: Optional[str] = None
client_id: Optional[str] = None
def __get_headers(self) -> Dict[str, str]:
is_managed = self.url == MANAGED_URL
headers = {
"Content-Type": "application/json",
}
if is_managed and not (self.api_key and self.client_id):
raise ValueError(
"""
You must provide an API key or a client ID to use the managed
version of Motorhead. Visit https://getmetal.io
for more information.
"""
)
if is_managed and self.api_key and self.client_id:
headers["x-metal-api-key"] = self.api_key
headers["x-metal-client-id"] = self.client_id
return headers
async def init(self) -> None:
res = requests.get(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
headers=self.__get_headers(),
)
res_data = res.json()
res_data = res_data.get("data", res_data) # Handle Managed Version
messages = res_data.get("messages", [])
context = res_data.get("context", "NONE")
for message in reversed(messages):
if message["role"] == "AI":
self.chat_memory.add_ai_message(message["content"])
else:
self.chat_memory.add_user_message(message["content"])
if context and context != "NONE":
self.context = context
def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]:
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
json={
"messages": [
{"role": "Human", "content": f"{input_str}"},
{"role": "AI", "content": f"{output_str}"},
]
},
headers=self.__get_headers(),
)
super().save_context(inputs, outputs)
def delete_session(self) -> None:
"""Delete a session"""
requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
except ImportError:
# Placeholder object
class MotorheadMemory: # type: ignore[no-redef]
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/naver.py | import logging
from typing import (
Any,
AsyncContextManager,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import httpx
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel, LangSmithParams
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import convert_to_secret_str, get_from_env
from pydantic import AliasChoices, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
_DEFAULT_BASE_URL = "https://clovastudio.stream.ntruss.com"
logger = logging.getLogger(__name__)
def _convert_chunk_to_message_chunk(
sse: Any, default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
sse_data = sse.json()
message = sse_data.get("message")
role = message.get("role")
content = message.get("content") or ""
if sse.event == "result":
response_metadata = {}
if "stopReason" in sse_data:
response_metadata["stopReason"] = sse_data["stopReason"]
return AIMessageChunk(content="", response_metadata=response_metadata)
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content) # type: ignore[call-arg]
def _convert_message_to_naver_chat_message(
message: BaseMessage,
) -> Dict:
if isinstance(message, ChatMessage):
return dict(role=message.role, content=message.content)
elif isinstance(message, HumanMessage):
return dict(role="user", content=message.content)
elif isinstance(message, SystemMessage):
return dict(role="system", content=message.content)
elif isinstance(message, AIMessage):
return dict(role="assistant", content=message.content)
else:
logger.warning(
"FunctionMessage, ToolMessage not yet supported "
"(https://api.ncloud-docs.com/docs/clovastudio-chatcompletions)"
)
raise ValueError(f"Got unknown type {message}")
def _convert_naver_chat_message_to_message(
_message: Dict,
) -> BaseMessage:
role = _message["role"]
assert role in (
"assistant",
"system",
"user",
), f"Expected role to be 'assistant', 'system', 'user', got {role}"
content = cast(str, _message["content"])
additional_kwargs: Dict = {}
if role == "user":
return HumanMessage(
content=content,
additional_kwargs=additional_kwargs,
)
elif role == "system":
return SystemMessage(
content=content,
additional_kwargs=additional_kwargs,
)
elif role == "assistant":
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
)
else:
logger.warning("Got unknown role %s", role)
raise ValueError(f"Got unknown role {role}")
async def _aiter_sse(
event_source_mgr: AsyncContextManager[Any],
) -> AsyncIterator[Dict]:
"""Iterate over the server-sent events."""
async with event_source_mgr as event_source:
await _araise_on_error(event_source.response)
async for sse in event_source.aiter_sse():
event_data = sse.json()
if sse.event == "signal" and event_data.get("data", {}) == "[DONE]":
return
yield sse
def _raise_on_error(response: httpx.Response) -> None:
"""Raise an error if the response is an error."""
if httpx.codes.is_error(response.status_code):
error_message = response.read().decode("utf-8")
raise httpx.HTTPStatusError(
f"Error response {response.status_code} "
f"while fetching {response.url}: {error_message}",
request=response.request,
response=response,
)
async def _araise_on_error(response: httpx.Response) -> None:
"""Raise an error if the response is an error."""
if httpx.codes.is_error(response.status_code):
error_message = (await response.aread()).decode("utf-8")
raise httpx.HTTPStatusError(
f"Error response {response.status_code} "
f"while fetching {response.url}: {error_message}",
request=response.request,
response=response,
)
class ChatClovaX(BaseChatModel):
"""`NCP ClovaStudio` Chat Completion API.
following environment variables set or passed in constructor in lower case:
- ``NCP_CLOVASTUDIO_API_KEY``
- ``NCP_APIGW_API_KEY``
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage
from langchain_community import ChatClovaX
model = ChatClovaX()
model.invoke([HumanMessage(content="Come up with 10 names for a song about parrots.")])
""" # noqa: E501
client: Optional[httpx.Client] = Field(default=None) #: :meta private:
async_client: Optional[httpx.AsyncClient] = Field(default=None) #: :meta private:
model_name: str = Field(
default="HCX-003",
validation_alias=AliasChoices("model_name", "model"),
description="NCP ClovaStudio chat model name",
)
task_id: Optional[str] = Field(
default=None, description="NCP Clova Studio chat model tuning task ID"
)
service_app: bool = Field(
default=False,
description="false: use testapp, true: use service app on NCP Clova Studio",
)
ncp_clovastudio_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Automatically inferred from env are `NCP_CLOVASTUDIO_API_KEY` if not provided."""
ncp_apigw_api_key: Optional[SecretStr] = Field(default=None, alias="apigw_api_key")
"""Automatically inferred from env are `NCP_APIGW_API_KEY` if not provided."""
base_url: str = Field(default="", alias="base_url")
"""
Automatically inferred from env are `NCP_CLOVASTUDIO_API_BASE_URL` if not provided.
"""
temperature: Optional[float] = Field(gt=0.0, le=1.0, default=0.5)
top_k: Optional[int] = Field(ge=0, le=128, default=0)
top_p: Optional[float] = Field(ge=0, le=1.0, default=0.8)
repeat_penalty: Optional[float] = Field(gt=0.0, le=10, default=5.0)
max_tokens: Optional[int] = Field(ge=0, le=4096, default=100)
stop_before: Optional[list[str]] = Field(default=None, alias="stop")
include_ai_filters: Optional[bool] = Field(default=False)
seed: Optional[int] = Field(ge=0, le=4294967295, default=0)
timeout: int = Field(gt=0, default=90)
max_retries: int = Field(ge=1, default=2)
model_config = ConfigDict(populate_by_name=True, protected_namespaces=())
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the API."""
defaults = {
"temperature": self.temperature,
"topK": self.top_k,
"topP": self.top_p,
"repeatPenalty": self.repeat_penalty,
"maxTokens": self.max_tokens,
"stopBefore": self.stop_before,
"includeAiFilters": self.include_ai_filters,
"seed": self.seed,
}
filtered = {k: v for k, v in defaults.items() if v is not None}
return filtered
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
self._default_params["model_name"] = self.model_name
return self._default_params
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"ncp_clovastudio_api_key": "NCP_CLOVASTUDIO_API_KEY",
"ncp_apigw_api_key": "NCP_APIGW_API_KEY",
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "chat-naver"
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "naver"
return params
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the client."""
return self._default_params
@property
def _api_url(self) -> str:
"""GET chat completion api url"""
app_type = "serviceapp" if self.service_app else "testapp"
if self.task_id:
return (
f"{self.base_url}/{app_type}/v1/tasks/{self.task_id}/chat-completions"
)
else:
return f"{self.base_url}/{app_type}/v1/chat-completions/{self.model_name}"
@model_validator(mode="after")
def validate_model_after(self) -> Self:
if not (self.model_name or self.task_id):
raise ValueError("either model_name or task_id must be assigned a value.")
if not self.ncp_clovastudio_api_key:
self.ncp_clovastudio_api_key = convert_to_secret_str(
get_from_env("ncp_clovastudio_api_key", "NCP_CLOVASTUDIO_API_KEY")
)
if not self.ncp_apigw_api_key:
self.ncp_apigw_api_key = convert_to_secret_str(
get_from_env("ncp_apigw_api_key", "NCP_APIGW_API_KEY", "")
)
if not self.base_url:
self.base_url = get_from_env(
"base_url", "NCP_CLOVASTUDIO_API_BASE_URL", _DEFAULT_BASE_URL
)
if not self.client:
self.client = httpx.Client(
base_url=self.base_url,
headers=self.default_headers(),
timeout=self.timeout,
)
if not self.async_client:
self.async_client = httpx.AsyncClient(
base_url=self.base_url,
headers=self.default_headers(),
timeout=self.timeout,
)
return self
def default_headers(self) -> Dict[str, Any]:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
clovastudio_api_key = (
self.ncp_clovastudio_api_key.get_secret_value()
if self.ncp_clovastudio_api_key
else None
)
if clovastudio_api_key:
headers["X-NCP-CLOVASTUDIO-API-KEY"] = clovastudio_api_key
apigw_api_key = (
self.ncp_apigw_api_key.get_secret_value()
if self.ncp_apigw_api_key
else None
)
if apigw_api_key:
headers["X-NCP-APIGW-API-KEY"] = apigw_api_key
return headers
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict], Dict[str, Any]]:
params = self._client_params
if stop is not None and "stopBefore" in params:
params["stopBefore"] = stop
message_dicts = [_convert_message_to_naver_chat_message(m) for m in messages]
return message_dicts, params
def _completion_with_retry(self, **kwargs: Any) -> Any:
from httpx_sse import (
ServerSentEvent,
SSEError,
connect_sse,
)
if "stream" not in kwargs:
kwargs["stream"] = False
stream = kwargs["stream"]
client = cast(httpx.Client, self.client)
if stream:
def iter_sse() -> Iterator[ServerSentEvent]:
with connect_sse(
client, "POST", self._api_url, json=kwargs
) as event_source:
_raise_on_error(event_source.response)
for sse in event_source.iter_sse():
event_data = sse.json()
if (
sse.event == "signal"
and event_data.get("data", {}) == "[DONE]"
):
return
if sse.event == "error":
raise SSEError(message=sse.data)
yield sse
return iter_sse()
else:
response = client.post(url=self._api_url, json=kwargs)
_raise_on_error(response)
return response.json()
async def _acompletion_with_retry(
self,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
from httpx_sse import aconnect_sse
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
if "stream" not in kwargs:
kwargs["stream"] = False
stream = kwargs["stream"]
async_client = cast(httpx.AsyncClient, self.async_client)
if stream:
event_source = aconnect_sse(
async_client, "POST", self._api_url, json=kwargs
)
return _aiter_sse(event_source)
else:
response = await async_client.post(url=self._api_url, json=kwargs)
await _araise_on_error(response)
return response.json()
return await _completion_with_retry(**kwargs)
def _create_chat_result(self, response: Dict) -> ChatResult:
generations = []
result = response.get("result", {})
msg = result.get("message", {})
message = _convert_naver_chat_message_to_message(msg)
if isinstance(message, AIMessage):
message.usage_metadata = {
"input_tokens": result.get("inputLength"),
"output_tokens": result.get("outputLength"),
"total_tokens": result.get("inputLength") + result.get("outputLength"),
}
gen = ChatGeneration(
message=message,
)
generations.append(gen)
llm_output = {
"stop_reason": result.get("stopReason"),
"input_length": result.get("inputLength"),
"output_length": result.get("outputLength"),
"seed": result.get("seed"),
"ai_filter": result.get("aiFilter"),
}
return ChatResult(generations=generations, llm_output=llm_output)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self._completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
for sse in self._completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
new_chunk = _convert_chunk_to_message_chunk(sse, default_chunk_class)
default_chunk_class = new_chunk.__class__
gen_chunk = ChatGenerationChunk(message=new_chunk)
if run_manager:
run_manager.on_llm_new_token(
token=cast(str, new_chunk.content), chunk=gen_chunk
)
yield gen_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await self._acompletion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
async for chunk in await self._acompletion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
new_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
default_chunk_class = new_chunk.__class__
gen_chunk = ChatGenerationChunk(message=new_chunk)
if run_manager:
await run_manager.on_llm_new_token(
token=cast(str, new_chunk.content), chunk=gen_chunk
)
yield gen_chunk
def _create_retry_decorator(
llm: ChatClovaX,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle exceptions"""
errors = [httpx.RequestError, httpx.StreamError]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/litellm_router.py | """LiteLLM Router as LangChain Model."""
from typing import (
Any,
AsyncIterator,
Iterator,
List,
Mapping,
Optional,
)
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessageChunk,
BaseMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from langchain_community.chat_models.litellm import (
ChatLiteLLM,
_convert_delta_to_message_chunk,
_convert_dict_to_message,
)
token_usage_key_name = "token_usage"
model_extra_key_name = "model_extra"
def get_llm_output(usage: Any, **params: Any) -> dict:
"""Get llm output from usage and params."""
llm_output = {token_usage_key_name: usage}
# copy over metadata (metadata came from router completion call)
metadata = params["metadata"]
for key in metadata:
if key not in llm_output:
# if token usage in metadata, prefer metadata's copy of it
llm_output[key] = metadata[key]
return llm_output
class ChatLiteLLMRouter(ChatLiteLLM):
"""LiteLLM Router as LangChain Model."""
router: Any
def __init__(self, *, router: Any, **kwargs: Any) -> None:
"""Construct Chat LiteLLM Router."""
super().__init__(**kwargs)
self.router = router
@property
def _llm_type(self) -> str:
return "LiteLLMRouter"
def _set_model_for_completion(self) -> None:
# use first model name (aka: model group),
# since we can only pass one to the router completion functions
self.model = self.router.model_list[0]["model_name"]
def _prepare_params_for_router(self, params: Any) -> None:
params["model"] = self.model
# allow the router to set api_base based on its model choice
api_base_key_name = "api_base"
if api_base_key_name in params and params[api_base_key_name] is None:
del params[api_base_key_name]
# add metadata so router can fill it below
params.setdefault("metadata", {})
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
self._set_model_for_completion()
self._prepare_params_for_router(params)
response = self.router.completion(
messages=message_dicts,
**params,
)
return self._create_chat_result(response, **params)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
default_chunk_class = AIMessageChunk
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
self._set_model_for_completion()
self._prepare_params_for_router(params)
for chunk in self.router.completion(messages=message_dicts, **params):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk, **params)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
default_chunk_class = AIMessageChunk
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
self._set_model_for_completion()
self._prepare_params_for_router(params)
async for chunk in await self.router.acompletion(
messages=message_dicts, **params
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(
chunk.content, chunk=cg_chunk, **params
)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
self._set_model_for_completion()
self._prepare_params_for_router(params)
response = await self.router.acompletion(
messages=message_dicts,
**params,
)
return self._create_chat_result(response, **params)
# from
# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/chat_models/openai.py
# but modified to handle LiteLLM Usage class
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
system_fingerprint = None
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
if token_usage is not None:
# get dict from LiteLLM Usage class
for k, v in token_usage.dict().items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
if system_fingerprint is None:
system_fingerprint = output.get("system_fingerprint")
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint:
combined["system_fingerprint"] = system_fingerprint
return combined
def _create_chat_result(
self, response: Mapping[str, Any], **params: Any
) -> ChatResult:
from litellm.utils import Usage
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", Usage(prompt_tokens=0, total_tokens=0))
llm_output = get_llm_output(token_usage, **params)
return ChatResult(generations=generations, llm_output=llm_output)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/baichuan.py | import json
import logging
from contextlib import asynccontextmanager
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
)
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.output_parsers.openai_tools import (
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from langchain_community.chat_models.llamacpp import (
_lc_invalid_tool_call_to_openai_tool_call,
_lc_tool_call_to_openai_tool_call,
)
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.baichuan-ai.com/v1/chat/completions"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
content = message.content
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": content}
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
elif message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_openai_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_openai_tool_call(tc)
for tc in message.invalid_tool_calls
]
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": content,
"name": message.name or message.additional_kwargs.get("name"),
}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict.get("content", "")
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
tool_calls = []
invalid_tool_calls = []
additional_kwargs = {}
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e:
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=invalid_tool_calls,
)
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=content,
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
@asynccontextmanager
async def aconnect_httpx_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
"""Async context manager for connecting to an SSE stream.
Args:
client: The httpx client.
method: The HTTP method.
url: The URL to connect to.
kwargs: Additional keyword arguments to pass to the client.
Yields:
An EventSource object.
"""
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
class ChatBaichuan(BaseChatModel):
"""Baichuan chat model integration.
Setup:
To use, you should have the environment variable``BAICHUAN_API_KEY`` set with
your API KEY.
.. code-block:: bash
export BAICHUAN_API_KEY="your-api-key"
Key init args — completion params:
model: Optional[str]
Name of Baichuan model to use.
max_tokens: Optional[int]
Max number of tokens to generate.
streaming: Optional[bool]
Whether to stream the results or not.
temperature: Optional[float]
Sampling temperature.
top_p: Optional[float]
What probability mass to use.
top_k: Optional[int]
What search sampling control to use.
Key init args — client params:
api_key: Optional[str]
Baichuan API key. If not passed in will be read from env var BAICHUAN_API_KEY.
base_url: Optional[str]
Base URL for API requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatBaichuan
chat = ChatBaichuan(
api_key=api_key,
model='Baichuan4',
# temperature=...,
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
chat.invoke(messages)
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'token_usage': {
'prompt_tokens': 93,
'completion_tokens': 5,
'total_tokens': 98
},
'model': 'Baichuan4'
},
id='run-944ff552-6a93-44cf-a861-4e4d849746f9-0'
)
Stream:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk)
.. code-block:: python
content='I' id='run-f99fcd6f-dd31-46d5-be8f-0b6a22bf77d8'
content=' enjoy programming.' id='run-f99fcd6f-dd31-46d5-be8f-0b6a22bf77d8
.. code-block:: python
stream = chat.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
AIMessageChunk(
content='I like programming.',
id='run-74689970-dc31-461d-b729-3b6aa93508d2'
)
Async:
.. code-block:: python
await chat.ainvoke(messages)
# stream
# async for chunk in chat.astream(messages):
# print(chunk)
# batch
# await chat.abatch([messages])
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'token_usage': {
'prompt_tokens': 93,
'completion_tokens': 5,
'total_tokens': 98
},
'model': 'Baichuan4'
},
id='run-952509ed-9154-4ff9-b187-e616d7ddfbba-0'
)
Tool calling:
.. code-block:: python
class get_current_weather(BaseModel):
'''Get current weather.'''
location: str = Field('City or province, such as Shanghai')
llm_with_tools = ChatBaichuan(model='Baichuan3-Turbo').bind_tools([get_current_weather])
llm_with_tools.invoke('How is the weather today?')
.. code-block:: python
[{'name': 'get_current_weather',
'args': {'location': 'New York'},
'id': '3951017OF8doB0A',
'type': 'tool_call'}]
Response metadata
.. code-block:: python
ai_msg = chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'token_usage': {
'prompt_tokens': 93,
'completion_tokens': 5,
'total_tokens': 98
},
'model': 'Baichuan4'
}
""" # noqa: E501
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"baichuan_api_key": "BAICHUAN_API_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
baichuan_api_base: str = Field(default=DEFAULT_API_BASE, alias="base_url")
"""Baichuan custom endpoints"""
baichuan_api_key: SecretStr = Field(alias="api_key")
"""Baichuan API Key"""
baichuan_secret_key: Optional[SecretStr] = None
"""[DEPRECATED, keeping it for for backward compatibility] Baichuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
request_timeout: int = Field(default=60, alias="timeout")
"""request timeout for chat http requests"""
model: str = "Baichuan2-Turbo-192K"
"""model name of Baichuan, default is `Baichuan2-Turbo-192K`,
other options include `Baichuan2-Turbo`"""
temperature: Optional[float] = Field(default=0.3)
"""What sampling temperature to use."""
top_k: int = 5
"""What search sampling control to use."""
top_p: float = 0.85
"""What probability mass to use."""
with_search_enhance: bool = False
"""[DEPRECATED, keeping it for for backward compatibility],
Whether to use search enhance, default is False."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["baichuan_api_base"] = get_from_dict_or_env(
values,
"baichuan_api_base",
"BAICHUAN_API_BASE",
DEFAULT_API_BASE,
)
values["baichuan_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
["baichuan_api_key", "api_key"],
"BAICHUAN_API_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Baichuan API."""
normal_params = {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": self.streaming,
"max_tokens": self.max_tokens,
}
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Baichuan api response: {res}")
response = res.json()
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, stream=True, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Baichuan api response: {res}")
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
chunk = chunk.decode("utf-8").strip("\r\n")
parts = chunk.split("data: ", 1)
chunk = parts[1] if len(parts) > 1 else None
if chunk is None:
continue
if chunk == "[DONE]":
break
response = json.loads(chunk)
for m in response.get("choices"):
chunk = _convert_delta_to_message_chunk(
m.get("delta"), default_chunk_class
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
response = await client.post(self.baichuan_api_base, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, stream=True, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
async with aconnect_httpx_sse(
client, "POST", self.baichuan_api_base, json=payload
) as event_source:
async for sse in event_source.aiter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], AIMessageChunk
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{"finish_reason": finish_reason}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
payload = self._create_payload_parameters(messages, **kwargs)
url = self.baichuan_api_base
headers = self._create_headers_parameters(**kwargs)
res = requests.post(
url=url,
timeout=self.request_timeout,
headers=headers,
json=payload,
stream=self.streaming,
)
return res
def _create_payload_parameters( # type: ignore[no-untyped-def]
self, messages: List[BaseMessage], **kwargs
) -> Dict[str, Any]:
parameters = {**self._default_params, **kwargs}
temperature = parameters.pop("temperature", 0.3)
top_k = parameters.pop("top_k", 5)
top_p = parameters.pop("top_p", 0.85)
model = parameters.pop("model")
with_search_enhance = parameters.pop("with_search_enhance", False)
stream = parameters.pop("stream", False)
tools = parameters.pop("tools", [])
payload = {
"model": model,
"messages": [_convert_message_to_dict(m) for m in messages],
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"with_search_enhance": with_search_enhance,
"stream": stream,
"tools": tools,
}
return payload
def _create_headers_parameters(self, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def]
parameters = {**self._default_params, **kwargs}
default_headers = parameters.pop("headers", {})
api_key = ""
if self.baichuan_api_key:
api_key = self.baichuan_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
**default_headers,
}
return headers
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for c in response["choices"]:
message = _convert_dict_to_message(c["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "baichuan-chat"
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool.
Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/yi.py | import json
import logging
from contextlib import asynccontextmanager
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Type
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
)
from pydantic import ConfigDict, Field, SecretStr
logger = logging.getLogger(__name__)
DEFAULT_API_BASE_CN = "https://api.lingyiwanwu.com/v1/chat/completions"
DEFAULT_API_BASE_GLOBAL = "https://api.01.ai/v1/chat/completions"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
elif role == "system":
return AIMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role: str = _dict["role"]
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content, type=role)
@asynccontextmanager
async def aconnect_httpx_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
class ChatYi(BaseChatModel):
"""Yi chat models API."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"yi_api_key": "YI_API_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
yi_api_base: str = Field(default=DEFAULT_API_BASE_CN)
yi_api_key: SecretStr = Field(alias="api_key")
region: str = Field(default="cn") # 默认使用中国区
streaming: bool = False
request_timeout: int = Field(default=60, alias="timeout")
model: str = "yi-large"
temperature: Optional[float] = Field(default=0.7)
top_p: float = 0.7
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
model_config = ConfigDict(
populate_by_name=True,
)
def __init__(self, **kwargs: Any) -> None:
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
kwargs,
["yi_api_key", "api_key"],
"YI_API_KEY",
)
)
if kwargs.get("yi_api_base") is None:
region = kwargs.get("region", "cn").lower()
if region == "global":
kwargs["yi_api_base"] = DEFAULT_API_BASE_GLOBAL
else:
kwargs["yi_api_base"] = DEFAULT_API_BASE_CN
all_required_field_names = get_pydantic_field_names(self.__class__)
extra = kwargs.get("model_kwargs", {})
for field_name in list(kwargs):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
extra[field_name] = kwargs.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
kwargs["model_kwargs"] = extra
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"stream": self.streaming,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Yi api response: {res}")
response = res.json()
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, stream=True, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Yi api response: {res}")
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
chunk = chunk.decode("utf-8").strip("\r\n")
parts = chunk.split("data: ", 1)
chunk = parts[1] if len(parts) > 1 else None
if chunk is None:
continue
if chunk == "[DONE]":
break
response = json.loads(chunk)
for m in response.get("choices"):
chunk = _convert_delta_to_message_chunk(
m.get("delta"), default_chunk_class
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
response = await client.post(self.yi_api_base, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, stream=True, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
async with aconnect_httpx_sse(
client, "POST", self.yi_api_base, json=payload
) as event_source:
async for sse in event_source.aiter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], AIMessageChunk
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{"finish_reason": finish_reason}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
payload = self._create_payload_parameters(messages, **kwargs)
url = self.yi_api_base
headers = self._create_headers_parameters(**kwargs)
res = requests.post(
url=url,
timeout=self.request_timeout,
headers=headers,
json=payload,
stream=self.streaming,
)
return res
def _create_payload_parameters(
self, messages: List[BaseMessage], **kwargs: Any
) -> Dict[str, Any]:
parameters = {**self._default_params, **kwargs}
temperature = parameters.pop("temperature", 0.7)
top_p = parameters.pop("top_p", 0.7)
model = parameters.pop("model")
stream = parameters.pop("stream", False)
payload = {
"model": model,
"messages": [_convert_message_to_dict(m) for m in messages],
"top_p": top_p,
"temperature": temperature,
"stream": stream,
}
return payload
def _create_headers_parameters(self, **kwargs: Any) -> Dict[str, Any]:
parameters = {**self._default_params, **kwargs}
default_headers = parameters.pop("headers", {})
api_key = ""
if self.yi_api_key:
api_key = self.yi_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
**default_headers,
}
return headers
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for c in response["choices"]:
message = _convert_dict_to_message(c["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "yi-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/outlines.py | from __future__ import annotations
import importlib.util
import platform
from collections.abc import AsyncIterator
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
get_origin,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Literal
from langchain_community.adapters.openai import convert_message_to_dict
_BM = TypeVar("_BM", bound=BaseModel)
_DictOrPydanticClass = Union[Dict[str, Any], Type[_BM], Type]
class ChatOutlines(BaseChatModel):
"""Outlines chat model integration.
Setup:
pip install outlines
Key init args — client params:
backend: Literal["llamacpp", "transformers", "transformers_vision", "vllm", "mlxlm"] = "transformers"
Specifies the backend to use for the model.
Key init args — completion params:
model: str
Identifier for the model to use with Outlines.
max_tokens: int = 256
The maximum number of tokens to generate.
stop: Optional[List[str]] = None
A list of strings to stop generation when encountered.
streaming: bool = True
Whether to stream the results, token by token.
See full list of supported init args and their descriptions in the params section.
Instantiate:
from langchain_community.chat_models import ChatOutlines
chat = ChatOutlines(model="meta-llama/Llama-2-7b-chat-hf")
Invoke:
chat.invoke([HumanMessage(content="Say foo:")])
Stream:
for chunk in chat.stream([HumanMessage(content="Count to 10:")]):
print(chunk.content, end="", flush=True)
""" # noqa: E501
client: Any = None # :meta private:
model: str
"""Identifier for the model to use with Outlines.
The model identifier should be a string specifying:
- A Hugging Face model name (e.g., "meta-llama/Llama-2-7b-chat-hf")
- A local path to a model
- For GGUF models, the format is "repo_id/file_name"
(e.g., "TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf")
Examples:
- "TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf"
- "meta-llama/Llama-2-7b-chat-hf"
"""
backend: Literal[
"llamacpp", "transformers", "transformers_vision", "vllm", "mlxlm"
] = "transformers"
"""Specifies the backend to use for the model.
Supported backends are:
- "llamacpp": For GGUF models using llama.cpp
- "transformers": For Hugging Face Transformers models (default)
- "transformers_vision": For vision-language models (e.g., LLaVA)
- "vllm": For models using the vLLM library
- "mlxlm": For models using the MLX framework
Note: Ensure you have the necessary dependencies installed for the chosen backend.
The system will attempt to import required packages and may raise an ImportError
if they are not available.
"""
max_tokens: int = 256
"""The maximum number of tokens to generate."""
stop: Optional[List[str]] = None
"""A list of strings to stop generation when encountered."""
streaming: bool = True
"""Whether to stream the results, token by token."""
regex: Optional[str] = None
"""Regular expression for structured generation.
If provided, Outlines will guarantee that the generated text matches this regex.
This can be useful for generating structured outputs like IP addresses, dates, etc.
Example: (valid IP address)
regex = r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)"
Note: Computing the regex index can take some time, so it's recommended to reuse
the same regex for multiple generations if possible.
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/regex/
"""
type_constraints: Optional[Union[type, str]] = None
"""Type constraints for structured generation.
Restricts the output to valid Python types. Supported types include:
int, float, bool, datetime.date, datetime.time, datetime.datetime.
Example:
type_constraints = int
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/format/
"""
json_schema: Optional[Union[Any, Dict, Callable]] = None
"""Pydantic model, JSON Schema, or callable (function signature)
for structured JSON generation.
Outlines can generate JSON output that follows a specified structure,
which is useful for:
1. Parsing the answer (e.g., with Pydantic), storing it, or returning it to a user.
2. Calling a function with the result.
You can provide:
- A Pydantic model
- A JSON Schema (as a Dict)
- A callable (function signature)
The generated JSON will adhere to the specified structure.
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/json/
"""
grammar: Optional[str] = None
"""Context-free grammar for structured generation.
If provided, Outlines will generate text that adheres to the specified grammar.
The grammar should be defined in EBNF format.
This can be useful for generating structured outputs like mathematical expressions,
programming languages, or custom domain-specific languages.
Example:
grammar = '''
?start: expression
?expression: term (("+" | "-") term)*
?term: factor (("*" | "/") factor)*
?factor: NUMBER | "-" factor | "(" expression ")"
%import common.NUMBER
'''
Note: Grammar-based generation is currently experimental and may have performance
limitations. It uses greedy generation to mitigate these issues.
For more details and examples, see:
https://dottxt-ai.github.io/outlines/reference/generation/cfg/
"""
custom_generator: Optional[Any] = None
"""Set your own outlines generator object to override the default behavior."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Additional parameters to pass to the underlying model.
Example:
model_kwargs = {"temperature": 0.8, "seed": 42}
"""
@model_validator(mode="after")
def validate_environment(self) -> "ChatOutlines":
"""Validate that outlines is installed and create a model instance."""
num_constraints = sum(
[
bool(self.regex),
bool(self.type_constraints),
bool(self.json_schema),
bool(self.grammar),
]
)
if num_constraints > 1:
raise ValueError(
"Either none or exactly one of regex, type_constraints, "
"json_schema, or grammar can be provided."
)
return self.build_client()
def build_client(self) -> "ChatOutlines":
try:
import outlines.models as models
except ImportError:
raise ImportError(
"Could not import the Outlines library. "
"Please install it with `pip install outlines`."
)
def check_packages_installed(
packages: List[Union[str, Tuple[str, str]]],
) -> None:
missing_packages = [
pkg if isinstance(pkg, str) else pkg[0]
for pkg in packages
if importlib.util.find_spec(pkg[1] if isinstance(pkg, tuple) else pkg)
is None
]
if missing_packages:
raise ImportError(
f"Missing packages: {', '.join(missing_packages)}. "
"You can install them with:\n\n"
f" pip install {' '.join(missing_packages)}"
)
if self.backend == "llamacpp":
check_packages_installed([("llama-cpp-python", "llama_cpp")])
if ".gguf" in self.model:
creator, repo_name, file_name = self.model.split("/", 2)
repo_id = f"{creator}/{repo_name}"
else:
raise ValueError("GGUF file_name must be provided for llama.cpp.")
self.client = models.llamacpp(repo_id, file_name, **self.model_kwargs)
elif self.backend == "transformers":
check_packages_installed(["transformers", "torch", "datasets"])
self.client = models.transformers(
model_name=self.model, **self.model_kwargs
)
elif self.backend == "transformers_vision":
if hasattr(models, "transformers_vision"):
from transformers import LlavaNextForConditionalGeneration
self.client = models.transformers_vision(
self.model,
model_class=LlavaNextForConditionalGeneration,
**self.model_kwargs,
)
else:
raise ValueError("transformers_vision backend is not supported")
elif self.backend == "vllm":
if platform.system() == "Darwin":
raise ValueError("vLLM backend is not supported on macOS.")
check_packages_installed(["vllm"])
self.client = models.vllm(self.model, **self.model_kwargs)
elif self.backend == "mlxlm":
check_packages_installed(["mlx"])
self.client = models.mlxlm(self.model, **self.model_kwargs)
else:
raise ValueError(f"Unsupported backend: {self.backend}")
return self
@property
def _llm_type(self) -> str:
return "outlines-chat"
@property
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"stop_at": self.stop,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"backend": self.backend,
"regex": self.regex,
"type_constraints": self.type_constraints,
"json_schema": self.json_schema,
"grammar": self.grammar,
**self._default_params,
}
@property
def _generator(self) -> Any:
from outlines import generate
if self.custom_generator:
return self.custom_generator
constraints = [
self.regex,
self.type_constraints,
self.json_schema,
self.grammar,
]
num_constraints = sum(constraint is not None for constraint in constraints)
if num_constraints != 1 and num_constraints != 0:
raise ValueError(
"Either none or exactly one of regex, type_constraints, "
"json_schema, or grammar can be provided."
)
if self.regex:
return generate.regex(self.client, regex_str=self.regex)
if self.type_constraints:
return generate.format(self.client, python_type=self.type_constraints)
if self.json_schema:
return generate.json(self.client, schema_object=self.json_schema)
if self.grammar:
return generate.cfg(self.client, cfg_str=self.grammar)
return generate.text(self.client)
def _convert_messages_to_openai_format(
self, messages: list[BaseMessage]
) -> list[dict]:
return [convert_message_to_dict(message) for message in messages]
def _convert_messages_to_prompt(self, messages: list[BaseMessage]) -> str:
"""Convert a list of messages to a single prompt."""
if self.backend == "llamacpp": # get base_model_name from gguf repo_id
from huggingface_hub import ModelCard
repo_creator, gguf_repo_name, file_name = self.model.split("/")
model_card = ModelCard.load(f"{repo_creator}/{gguf_repo_name}")
if hasattr(model_card.data, "base_model"):
model_name = model_card.data.base_model
else:
raise ValueError(f"Base model name not found for {self.model}")
else:
model_name = self.model
from transformers import AutoTokenizer
return AutoTokenizer.from_pretrained(model_name).apply_chat_template(
self._convert_messages_to_openai_format(messages),
tokenize=False,
add_generation_prompt=True,
)
def bind_tools(
self,
tools: Sequence[Dict[str, Any] | type | Callable[..., Any] | BaseTool],
*,
tool_choice: Optional[Union[Dict, bool, str]] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model
tool_choice: does not currently support "any", "auto" choices like OpenAI
tool-calling API. should be a dict of the form to force this tool
{"type": "function", "function": {"name": <<tool_name>>}}.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
tool_names = [ft["function"]["name"] for ft in formatted_tools]
if tool_choice:
if isinstance(tool_choice, dict):
if not any(
tool_choice["function"]["name"] == name for name in tool_names
):
raise ValueError(
f"Tool choice {tool_choice=} was specified, but the only "
f"provided tools were {tool_names}."
)
elif isinstance(tool_choice, str):
chosen = [
f for f in formatted_tools if f["function"]["name"] == tool_choice
]
if not chosen:
raise ValueError(
f"Tool choice {tool_choice=} was specified, but the only "
f"provided tools were {tool_names}."
)
elif isinstance(tool_choice, bool):
if len(formatted_tools) > 1:
raise ValueError(
"tool_choice=True can only be specified when a single tool is "
f"passed in. Received {len(tools)} tools."
)
tool_choice = formatted_tools[0]
kwargs["tool_choice"] = tool_choice
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind_tools(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Optional[_DictOrPydanticClass],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
if get_origin(schema) is TypedDict:
raise NotImplementedError("TypedDict is not supported yet by Outlines")
self.json_schema = schema
if isinstance(schema, type) and issubclass(schema, BaseModel):
parser: Union[PydanticOutputParser, JsonOutputParser] = (
PydanticOutputParser(pydantic_object=schema)
)
else:
parser = JsonOutputParser()
if include_raw: # TODO
raise NotImplementedError("include_raw is not yet supported")
return self | parser
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
prompt = self._convert_messages_to_prompt(messages)
response = ""
if self.streaming:
for chunk in self._stream(
messages=messages,
stop=stop,
run_manager=run_manager,
**kwargs,
):
if isinstance(chunk.message.content, str):
response += chunk.message.content
else:
raise ValueError(
"Invalid content type, only str is supported, "
f"got {type(chunk.message.content)}"
)
else:
response = self._generator(prompt, **params)
message = AIMessage(content=response)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
prompt = self._convert_messages_to_prompt(messages)
for token in self._generator.stream(prompt, **params):
if run_manager:
run_manager.on_llm_new_token(token)
message_chunk = AIMessageChunk(content=token)
chunk = ChatGenerationChunk(message=message_chunk)
yield chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: List[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if hasattr(self._generator, "agenerate"):
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
prompt = self._convert_messages_to_prompt(messages)
response = await self._generator.agenerate(prompt, **params)
message = AIMessage(content=response)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
elif self.streaming:
response = ""
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
response += chunk.message.content or ""
message = AIMessage(content=response)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
else:
return await super()._agenerate(messages, stop, run_manager, **kwargs)
async def _astream(
self,
messages: List[BaseMessage],
stop: List[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if hasattr(self._generator, "astream"):
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
prompt = self._convert_messages_to_prompt(messages)
async for token in self._generator.astream(prompt, **params):
if run_manager:
await run_manager.on_llm_new_token(token)
message_chunk = AIMessageChunk(content=token)
chunk = ChatGenerationChunk(message=message_chunk)
yield chunk
else:
async for chunk in super()._astream(messages, stop, run_manager, **kwargs):
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/databricks.py | import logging
from urllib.parse import urlparse
from langchain_core._api import deprecated
from langchain_community.chat_models.mlflow import ChatMlflow
logger = logging.getLogger(__name__)
@deprecated(
since="0.3.3",
removal="1.0",
alternative_import="langchain_databricks.ChatDatabricks",
)
class ChatDatabricks(ChatMlflow):
"""`Databricks` chat models API.
To use, you should have the ``mlflow`` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatDatabricks
chat_model = ChatDatabricks(
target_uri="databricks",
endpoint="databricks-llama-2-70b-chat",
temperature=0.1,
)
# single input invocation
print(chat_model.invoke("What is MLflow?").content)
# single input invocation with streaming response
for chunk in chat_model.stream("What is MLflow?"):
print(chunk.content, end="|")
"""
target_uri: str = "databricks"
"""The target URI to use. Defaults to ``databricks``."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "databricks-chat"
@property
def _mlflow_extras(self) -> str:
return ""
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
if urlparse(self.target_uri).scheme != "databricks":
raise ValueError(
"Invalid target URI. The target URI must be a valid databricks URI."
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/mlflow_ai_gateway.py | import logging
import warnings
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
)
from pydantic import BaseModel
logger = logging.getLogger(__name__)
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
class ChatParams(BaseModel, extra="allow"):
"""Parameters for the `MLflow AI Gateway` LLM."""
temperature: float = 0.0
candidate_count: int = 1
"""The number of candidates to return."""
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class ChatMLflowAIGateway(BaseChatModel):
"""`MLflow AI Gateway` chat models API.
To use, you should have the ``mlflow[gateway]`` python package installed.
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatMLflowAIGateway
chat = ChatMLflowAIGateway(
gateway_uri="<your-mlflow-ai-gateway-uri>",
route="<your-mlflow-ai-gateway-chat-route>",
params={
"temperature": 0.1
}
)
"""
def __init__(self, **kwargs: Any):
warnings.warn(
"`ChatMLflowAIGateway` is deprecated. Use `ChatMlflow` or "
"`ChatDatabricks` instead.",
DeprecationWarning,
)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
route: str
gateway_uri: Optional[str] = None
params: Optional[ChatParams] = None
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
message_dicts = [
ChatMLflowAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = mlflow.gateway.query(self.route, data=data)
return ChatMLflowAIGateway._create_chat_result(resp)
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "mlflow-ai-gateway-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by the MLflow AI Gateway. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by the MLflow AI Gateway. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatMLflowAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by MLflow AI Gateway "
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for candidate in response["candidates"]:
message = ChatMLflowAIGateway._convert_dict_to_message(candidate["message"])
message_metadata = candidate.get("metadata", {})
gen = ChatGeneration(
message=message,
generation_info=dict(message_metadata),
)
generations.append(gen)
response_metadata = response.get("metadata", {})
return ChatResult(generations=generations, llm_output=response_metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/jinachat.py | """JinaChat wrapper."""
from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: JinaChat, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
content = _dict["content"] or ""
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"name": message.name,
"content": message.content,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class JinaChat(BaseChatModel):
"""`Jina AI` Chat models API.
To use, you should have the ``openai`` python package installed, and the
environment variable ``JINACHAT_API_KEY`` set to your API key, which you
can generate at https://chat.jina.ai/api.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import JinaChat
chat = JinaChat()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"jinachat_api_key": "JINACHAT_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
client: Any = None #: :meta private:
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
jinachat_api_key: Optional[SecretStr] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to JinaChat completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["jinachat_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "jinachat_api_key", "JINACHAT_API_KEY")
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling JinaChat API."""
return {
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts, **params):
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"]}
return ChatResult(generations=generations, llm_output=llm_output)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, **params
):
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(self, messages=message_dicts, **params)
return self._create_chat_result(response)
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
jinachat_creds: Dict[str, Any] = {
"api_key": self.jinachat_api_key
and self.jinachat_api_key.get_secret_value(),
"api_base": "https://api.chat.jina.ai/v1",
"model": "jinachat",
}
return {**jinachat_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "jinachat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/moonshot.py | """Wrapper around Moonshot chat models."""
from typing import Dict
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
pre_init,
)
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms.moonshot import MOONSHOT_SERVICE_URL_BASE, MoonshotCommon
class MoonshotChat(MoonshotCommon, ChatOpenAI): # type: ignore[misc, override, override]
"""Moonshot large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``MOONSHOT_API_KEY`` set with your API key.
(Moonshot's chat API is compatible with OpenAI's SDK.)
Referenced from https://platform.moonshot.cn/docs
Example:
.. code-block:: python
from langchain_community.chat_models.moonshot import MoonshotChat
moonshot = MoonshotChat(model="moonshot-v1-8k")
"""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the environment is set up correctly."""
values["moonshot_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
["moonshot_api_key", "api_key", "openai_api_key"],
"MOONSHOT_API_KEY",
)
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
client_params = {
"api_key": values["moonshot_api_key"].get_secret_value(),
"base_url": values["base_url"]
if "base_url" in values
else MOONSHOT_SERVICE_URL_BASE,
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
return values
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/zhipuai.py | """ZhipuAI chat models wrapper."""
from __future__ import annotations
import json
import logging
import time
from collections.abc import AsyncIterator, Iterator
from contextlib import asynccontextmanager, contextmanager
from operator import itemgetter
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
API_TOKEN_TTL_SECONDS = 3 * 60
ZHIPUAI_API_BASE = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and issubclass(obj, BaseModel)
@contextmanager
def connect_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
"""Context manager for connecting to an SSE stream.
Args:
client: The HTTP client.
method: The HTTP method.
url: The URL.
kwargs: Additional keyword arguments.
Yields:
The event source.
"""
from httpx_sse import EventSource
with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
@asynccontextmanager
async def aconnect_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
"""Async context manager for connecting to an SSE stream.
Args:
client: The HTTP client.
method: The HTTP method.
url: The URL.
kwargs: Additional keyword arguments.
Yields:
The event source.
"""
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
def _get_jwt_token(api_key: str) -> str:
"""Gets JWT token for ZhipuAI API.
See 'https://open.bigmodel.cn/dev/api#nosdk'.
Args:
api_key: The API key for ZhipuAI API.
Returns:
The JWT token.
"""
try:
import jwt
except ImportError:
raise ImportError(
"jwt package not found, please install it with" "`pip install pyjwt`"
)
try:
id, secret = api_key.split(".")
except ValueError as err:
raise ValueError(f"Invalid API key: {api_key}") from err
payload = {
"api_key": id,
"exp": int(round(time.time() * 1000)) + API_TOKEN_TTL_SECONDS * 1000,
"timestamp": int(round(time.time() * 1000)),
}
return jwt.encode(
payload,
secret,
algorithm="HS256",
headers={"alg": "HS256", "sign_type": "SIGN"},
)
def _convert_dict_to_message(dct: Dict[str, Any]) -> BaseMessage:
role = dct.get("role")
content = dct.get("content", "")
if role == "system":
return SystemMessage(content=content)
if role == "user":
return HumanMessage(content=content)
if role == "assistant":
additional_kwargs = {}
tool_calls = dct.get("tool_calls", None)
if tool_calls is not None:
additional_kwargs["tool_calls"] = tool_calls
return AIMessage(content=content, additional_kwargs=additional_kwargs)
if role == "tool":
additional_kwargs = {}
if "name" in dct:
additional_kwargs["name"] = dct["name"]
return ToolMessage(
content=content,
tool_call_id=dct.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
return ChatMessage(role=role, content=content) # type: ignore[arg-type]
def _convert_message_to_dict(message: BaseMessage) -> Dict[str, Any]:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
"name": message.name or message.additional_kwargs.get("name"),
}
else:
raise TypeError(f"Got unknown type '{message.__class__.__name__}'.")
return message_dict
def _convert_delta_to_message_chunk(
dct: Dict[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = dct.get("role")
content = dct.get("content", "")
additional_kwargs = {}
tool_calls = dct.get("tool_calls", None)
if tool_calls is not None:
additional_kwargs["tool_calls"] = tool_calls
if role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
if role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
if role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
return default_class(content=content) # type: ignore[call-arg]
def _truncate_params(payload: Dict[str, Any]) -> None:
"""Truncate temperature and top_p parameters between [0.01, 0.99].
ZhipuAI only support temperature / top_p between (0, 1) open interval,
so we truncate them to [0.01, 0.99].
"""
temperature = payload.get("temperature")
top_p = payload.get("top_p")
if temperature is not None:
payload["temperature"] = max(0.01, min(0.99, temperature))
if top_p is not None:
payload["top_p"] = max(0.01, min(0.99, top_p))
class ChatZhipuAI(BaseChatModel):
"""ZhipuAI chat model integration.
Setup:
Install ``PyJWT`` and set environment variable ``ZHIPUAI_API_KEY``
.. code-block:: bash
pip install pyjwt
export ZHIPUAI_API_KEY="your-api-key"
Key init args — completion params:
model: Optional[str]
Name of ZhipuAI model to use.
temperature: float
Sampling temperature.
max_tokens: Optional[int]
Max number of tokens to generate.
Key init args — client params:
api_key: Optional[str]
ZhipuAI API key. If not passed in will be read from env var ZHIPUAI_API_KEY.
api_base: Optional[str]
Base URL for API requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatZhipuAI
zhipuai_chat = ChatZhipuAI(
temperature=0.5,
api_key="your-api-key",
model="glm-4",
# api_base="...",
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
zhipuai_chat.invoke(messages)
.. code-block:: python
AIMessage(content='I enjoy programming.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 23, 'total_tokens': 29}, 'model_name': 'glm-4', 'finish_reason': 'stop'}, id='run-c5d9af91-55c6-470e-9545-02b2fa0d7f9d-0')
Stream:
.. code-block:: python
for chunk in zhipuai_chat.stream(messages):
print(chunk)
.. code-block:: python
content='I' id='run-4df71729-618f-4e2b-a4ff-884682723082'
content=' enjoy' id='run-4df71729-618f-4e2b-a4ff-884682723082'
content=' programming' id='run-4df71729-618f-4e2b-a4ff-884682723082'
content='.' id='run-4df71729-618f-4e2b-a4ff-884682723082'
content='' response_metadata={'finish_reason': 'stop'} id='run-4df71729-618f-4e2b-a4ff-884682723082'
.. code-block:: python
stream = zhipuai_chat.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block::
AIMessageChunk(content='I enjoy programming.', response_metadata={'finish_reason': 'stop'}, id='run-20b05040-a0b4-4715-8fdc-b39dba9bfb53')
Async:
.. code-block:: python
await zhipuai_chat.ainvoke(messages)
# stream:
# async for chunk in zhipuai_chat.astream(messages):
# print(chunk)
# batch:
# await zhipuai_chat.abatch([messages])
.. code-block:: python
[AIMessage(content='I enjoy programming.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 23, 'total_tokens': 29}, 'model_name': 'glm-4', 'finish_reason': 'stop'}, id='run-ba06af9d-4baa-40b2-9298-be9c62aa0849-0')]
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
chat_with_tools = zhipuai_chat.bind_tools([GetWeather, GetPopulation])
ai_msg = chat_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
.. code-block:: python
[
{
'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': 'call_202408222146464ea49ec8731145a9',
'type': 'tool_call'
}
]
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_chat = zhipuai_chat.with_structured_output(Joke)
structured_chat.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(setup='What do cats like to eat for breakfast?', punchline='Mice Krispies!', rating=None)
Response metadata
.. code-block:: python
ai_msg = zhipuai_chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{'token_usage': {'completion_tokens': 6,
'prompt_tokens': 23,
'total_tokens': 29},
'model_name': 'glm-4',
'finish_reason': 'stop'}
""" # noqa: E501
@property
def lc_secrets(self) -> Dict[str, str]:
return {"zhipuai_api_key": "ZHIPUAI_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "zhipuai"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.zhipuai_api_base:
attributes["zhipuai_api_base"] = self.zhipuai_api_base
return attributes
@property
def _llm_type(self) -> str:
"""Return the type of chat model."""
return "zhipuai-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
params = {
"model": self.model_name,
"stream": self.streaming,
"temperature": self.temperature,
}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
# client:
zhipuai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `ZHIPUAI_API_KEY` if not provided."""
zhipuai_api_base: Optional[str] = Field(default=None, alias="api_base")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator.
"""
model_name: Optional[str] = Field(default="glm-4", alias="model")
"""
Model name to use, see 'https://open.bigmodel.cn/dev/api#language'.
Alternatively, you can use any fine-tuned model from the GLM series.
"""
temperature: float = 0.95
"""
What sampling temperature to use. The value ranges from 0.0 to 1.0 and cannot
be equal to 0.
The larger the value, the more random and creative the output; The smaller
the value, the more stable or certain the output will be.
You are advised to adjust top_p or temperature parameters based on application
scenarios, but do not adjust the two parameters at the same time.
"""
top_p: float = 0.7
"""
Another method of sampling temperature is called nuclear sampling. The value
ranges from 0.0 to 1.0 and cannot be equal to 0 or 1.
The model considers the results with top_p probability quality tokens.
For example, 0.1 means that the model decoder only considers tokens from the
top 10% probability of the candidate set.
You are advised to adjust top_p or temperature parameters based on application
scenarios, but do not adjust the two parameters at the same time.
"""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict[str, Any]) -> Any:
values["zhipuai_api_key"] = get_from_dict_or_env(
values, ["zhipuai_api_key", "api_key"], "ZHIPUAI_API_KEY"
)
values["zhipuai_api_base"] = get_from_dict_or_env(
values, "zhipuai_api_base", "ZHIPUAI_API_BASE", default=ZHIPUAI_API_BASE
)
return values
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
generations.append(
ChatGeneration(message=message, generation_info=generation_info)
)
token_usage = response.get("usage", {})
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
}
return ChatResult(generations=generations, llm_output=llm_output)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate a chat response."""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
if self.zhipuai_api_key is None:
raise ValueError("Did not find zhipuai_api_key.")
message_dicts, params = self._create_message_dicts(messages, stop)
payload = {
**params,
**kwargs,
"messages": message_dicts,
"stream": False,
}
_truncate_params(payload)
headers = {
"Authorization": _get_jwt_token(self.zhipuai_api_key),
"Accept": "application/json",
}
import httpx
with httpx.Client(headers=headers, timeout=60) as client:
response = client.post(self.zhipuai_api_base, json=payload) # type: ignore[arg-type]
response.raise_for_status()
return self._create_chat_result(response.json())
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the chat response in chunks."""
if self.zhipuai_api_key is None:
raise ValueError("Did not find zhipuai_api_key.")
if self.zhipuai_api_base is None:
raise ValueError("Did not find zhipu_api_base.")
message_dicts, params = self._create_message_dicts(messages, stop)
payload = {**params, **kwargs, "messages": message_dicts, "stream": True}
_truncate_params(payload)
headers = {
"Authorization": _get_jwt_token(self.zhipuai_api_key),
"Accept": "application/json",
}
default_chunk_class = AIMessageChunk
import httpx
with httpx.Client(headers=headers, timeout=60) as client:
with connect_sse(
client, "POST", self.zhipuai_api_base, json=payload
) as event_source:
for sse in event_source.iter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
usage = chunk.get("usage", None)
model_name = chunk.get("model", "")
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{
"finish_reason": finish_reason,
"token_usage": usage,
"model_name": model_name,
}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
if self.zhipuai_api_key is None:
raise ValueError("Did not find zhipuai_api_key.")
message_dicts, params = self._create_message_dicts(messages, stop)
payload = {
**params,
**kwargs,
"messages": message_dicts,
"stream": False,
}
_truncate_params(payload)
headers = {
"Authorization": _get_jwt_token(self.zhipuai_api_key),
"Accept": "application/json",
}
import httpx
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
response = await client.post(self.zhipuai_api_base, json=payload) # type: ignore[arg-type]
response.raise_for_status()
return self._create_chat_result(response.json())
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if self.zhipuai_api_key is None:
raise ValueError("Did not find zhipuai_api_key.")
if self.zhipuai_api_base is None:
raise ValueError("Did not find zhipu_api_base.")
message_dicts, params = self._create_message_dicts(messages, stop)
payload = {**params, **kwargs, "messages": message_dicts, "stream": True}
_truncate_params(payload)
headers = {
"Authorization": _get_jwt_token(self.zhipuai_api_key),
"Accept": "application/json",
}
default_chunk_class = AIMessageChunk
import httpx
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
async with aconnect_sse(
client, "POST", self.zhipuai_api_base, json=payload
) as event_source:
async for sse in event_source.aiter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
usage = chunk.get("usage", None)
model_name = chunk.get("model", "")
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{
"finish_reason": finish_reason,
"token_usage": usage,
"model_name": model_name,
}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[
Union[dict, str, Literal["auto", "any", "none"], bool]
] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
tool_choice: Currently this can only be auto for this chat model.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
if self.model_name == "glm-4v":
raise ValueError("glm-4v currently does not support tool calling")
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice and tool_choice != "auto":
raise ValueError("ChatZhipuAI currently only supports `auto` tool choice")
elif tool_choice and tool_choice == "auto":
kwargs["tool_choice"] = tool_choice
return self.bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Optional[Union[Dict, Type[BaseModel]]] = None,
*,
method: Literal["function_calling", "json_mode"] = "function_calling",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec.
method: The method for steering model generation, either "function_calling"
or "json_mode". ZhipuAI only supports "function_calling" which
converts the schema to a OpenAI function and the model will make use of the
function-calling API.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_community.chat_models import ChatZhipuAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatZhipuAI(temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='A pound of bricks and a pound of feathers weigh the same.'
# justification="Both a pound of bricks and a pound of feathers have been defined to have the same weight. The 'pound' is a unit of weight, so any two things that are described as weighing a pound will weigh the same."
# )
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=True):
.. code-block:: python
from langchain_community.chat_models import ChatZhipuAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatZhipuAI(temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_01htjn3cspevxbqc1d7nkk8wab', 'function': {'arguments': '{"answer": "A pound of bricks and a pound of feathers weigh the same.", "justification": "Both a pound of bricks and a pound of feathers have been defined to have the same weight. The \'pound\' is a unit of weight, so any two things that are described as weighing a pound will weigh the same.", "unit": "pounds"}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}, id='run-456beee6-65f6-4e80-88af-a6065480822c-0'),
# 'parsed': AnswerWithJustification(answer='A pound of bricks and a pound of feathers weigh the same.', justification="Both a pound of bricks and a pound of feathers have been defined to have the same weight. The 'pound' is a unit of weight, so any two things that are described as weighing a pound will weigh the same."),
# 'parsing_error': None
# }
Example: Function-calling, dict schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_community.chat_models import ChatZhipuAI
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = ChatZhipuAI(temperature=0)
structured_llm = llm.with_structured_output(dict_schema)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'A pound of bricks and a pound of feathers weigh the same.',
# 'justification': "Both a pound of bricks and a pound of feathers have been defined to have the same weight. The 'pound' is a unit of weight, so any two things that are described as weighing a pound will weigh the same.", 'unit': 'pounds'}
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
raise ValueError(
"schema must be specified when method is 'function_calling'. "
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
llm = self.bind_tools([schema], tool_choice="auto")
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
else:
raise ValueError(
f"""Unrecognized method argument. Expected 'function_calling'.
Received: '{method}'"""
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/bedrock.py | import re
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from pydantic import ConfigDict
from langchain_community.chat_models.anthropic import (
convert_messages_to_prompt_anthropic,
)
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
from langchain_community.llms.bedrock import BedrockBase
from langchain_community.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
def _convert_one_message_to_text_mistral(message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_mistral(messages: List[BaseMessage]) -> str:
"""Convert a list of messages to a prompt for mistral."""
return "\n".join(
[_convert_one_message_to_text_mistral(message) for message in messages]
)
def _format_image(image_url: str) -> Dict:
"""
Formats an image of format data:image/jpeg;base64,{b64_string}
to a dict for anthropic api
{
"type": "base64",
"media_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
And throws an error if it's not a b64 image
"""
regex = r"^data:(?P<media_type>image/.+);base64,(?P<data>.+)$"
match = re.match(regex, image_url)
if match is None:
raise ValueError(
"Anthropic only supports base64-encoded images currently."
" Example: data:image/png;base64,'/9j/4AAQSk'..."
)
return {
"type": "base64",
"media_type": match.group("media_type"),
"data": match.group("data"),
}
def _format_anthropic_messages(
messages: List[BaseMessage],
) -> Tuple[Optional[str], List[Dict]]:
"""Format messages for anthropic."""
"""
[
{
"role": _message_type_lookups[m.type],
"content": [_AnthropicMessageContent(text=m.content).dict()],
}
for m in messages
]
"""
system: Optional[str] = None
formatted_messages: List[Dict] = []
for i, message in enumerate(messages):
if message.type == "system":
if i != 0:
raise ValueError("System message must be at beginning of message list.")
if not isinstance(message.content, str):
raise ValueError(
"System message must be a string, "
f"instead was: {type(message.content)}"
)
system = message.content
continue
role = _message_type_lookups[message.type]
content: Union[str, List[Dict]]
if not isinstance(message.content, str):
# parse as dict
assert isinstance(
message.content, list
), "Anthropic message content must be str or list of dicts"
# populate content
content = []
for item in message.content:
if isinstance(item, str):
content.append(
{
"type": "text",
"text": item,
}
)
elif isinstance(item, dict):
if "type" not in item:
raise ValueError("Dict content item must have a type key")
if item["type"] == "image_url":
# convert format
source = _format_image(item["image_url"]["url"])
content.append(
{
"type": "image",
"source": source,
}
)
else:
content.append(item)
else:
raise ValueError(
f"Content items must be str or dict, instead was: {type(item)}"
)
else:
content = message.content
formatted_messages.append(
{
"role": role,
"content": content,
}
)
return system, formatted_messages
class ChatPromptAdapter:
"""Adapter class to prepare the inputs from Langchain to prompt format
that Chat model expects.
"""
@classmethod
def convert_messages_to_prompt(
cls, provider: str, messages: List[BaseMessage]
) -> str:
if provider == "anthropic":
prompt = convert_messages_to_prompt_anthropic(messages=messages)
elif provider == "meta":
prompt = convert_messages_to_prompt_llama(messages=messages)
elif provider == "mistral":
prompt = convert_messages_to_prompt_mistral(messages=messages)
elif provider == "amazon":
prompt = convert_messages_to_prompt_anthropic(
messages=messages,
human_prompt="\n\nUser:",
ai_prompt="\n\nBot:",
)
else:
raise NotImplementedError(
f"Provider {provider} model does not support chat."
)
return prompt
@classmethod
def format_messages(
cls, provider: str, messages: List[BaseMessage]
) -> Tuple[Optional[str], List[Dict]]:
if provider == "anthropic":
return _format_anthropic_messages(messages)
raise NotImplementedError(
f"Provider {provider} not supported for format_messages"
)
_message_type_lookups = {
"human": "user",
"ai": "assistant",
"AIMessageChunk": "assistant",
"HumanMessageChunk": "user",
"function": "user",
}
@deprecated(
since="0.0.34", removal="1.0", alternative_import="langchain_aws.ChatBedrock"
)
class BedrockChat(BaseChatModel, BedrockBase):
"""Chat model that uses the Bedrock API."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "amazon_bedrock_chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "bedrock"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.region_name:
attributes["region_name"] = self.region_name
return attributes
model_config = ConfigDict(
extra="forbid",
)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
provider = self._get_provider()
prompt, system, formatted_messages = None, None, None
if provider == "anthropic":
system, formatted_messages = ChatPromptAdapter.format_messages(
provider, messages
)
else:
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
for chunk in self._prepare_input_and_invoke_stream(
prompt=prompt,
system=system,
messages=formatted_messages,
stop=stop,
run_manager=run_manager,
**kwargs,
):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
llm_output: Dict[str, Any] = {"model_id": self.model_id}
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
provider = self._get_provider()
prompt, system, formatted_messages = None, None, None
params: Dict[str, Any] = {**kwargs}
if provider == "anthropic":
system, formatted_messages = ChatPromptAdapter.format_messages(
provider, messages
)
else:
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
if stop:
params["stop_sequences"] = stop
completion, usage_info = self._prepare_input_and_invoke(
prompt=prompt,
stop=stop,
run_manager=run_manager,
system=system,
messages=formatted_messages,
**params,
)
llm_output["usage"] = usage_info
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content=completion))],
llm_output=llm_output,
)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
final_usage: Dict[str, int] = defaultdict(int)
final_output = {}
for output in llm_outputs:
output = output or {}
usage = output.get("usage", {})
for token_type, token_count in usage.items():
final_usage[token_type] += token_count
final_output.update(output)
final_output["usage"] = final_usage
return final_output
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/llama_edge.py | import json
import logging
import re
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import get_pydantic_field_names
from pydantic import ConfigDict, model_validator
logger = logging.getLogger(__name__)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
class LlamaEdgeChatService(BaseChatModel):
"""Chat with LLMs via `llama-api-server`
For the information about `llama-api-server`, visit https://github.com/second-state/LlamaEdge
"""
request_timeout: int = 60
"""request timeout for chat http requests"""
service_url: Optional[str] = None
"""URL of WasmChat service"""
model: str = "NA"
"""model name, default is `NA`."""
streaming: bool = False
"""Whether to stream the results or not."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error code: {res.status_code}, reason: {res.reason}")
response = res.json()
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
substring = '"object":"chat.completion.chunk"}'
for line in res.iter_lines():
chunks = []
if line:
json_string = line.decode("utf-8")
# Find all positions of the substring
positions = [m.start() for m in re.finditer(substring, json_string)]
positions = [-1 * len(substring)] + positions
for i in range(len(positions) - 1):
chunk = json.loads(
json_string[
positions[i] + len(substring) : positions[i + 1]
+ len(substring)
]
)
chunks.append(chunk)
for chunk in chunks:
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
if (
choice.get("finish_reason") is not None
and choice.get("finish_reason") == "stop"
):
break
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason)
if finish_reason is not None
else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
if self.service_url is None:
res = requests.models.Response()
res.status_code = 503
res.reason = "The IP address or port of the chat service is incorrect."
return res
service_url = f"{self.service_url}/v1/chat/completions"
if self.streaming:
payload = {
"model": self.model,
"messages": [_convert_message_to_dict(m) for m in messages],
"stream": self.streaming,
}
else:
payload = {
"model": self.model,
"messages": [_convert_message_to_dict(m) for m in messages],
}
res = requests.post(
url=service_url,
timeout=self.request_timeout,
headers={
"accept": "application/json",
"Content-Type": "application/json",
},
data=json.dumps(payload),
)
return res
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
message = _convert_dict_to_message(response["choices"][0].get("message"))
generations = [ChatGeneration(message=message)]
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "wasm-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/sparkllm.py | import base64
import hashlib
import hmac
import json
import logging
import queue
import threading
from datetime import datetime
from queue import Queue
from time import mktime
from typing import Any, Dict, Generator, Iterator, List, Mapping, Optional, Type, cast
from urllib.parse import urlencode, urlparse, urlunparse
from wsgiref.handlers import format_date_time
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
ToolMessageChunk,
)
from langchain_core.output_parsers.openai_tools import (
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
SPARK_API_URL = "wss://spark-api.xf-yun.com/v3.5/chat"
SPARK_LLM_DOMAIN = "generalv3.5"
def convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
msg_role = _dict["role"]
msg_content = _dict["content"]
if msg_role == "user":
return HumanMessage(content=msg_content)
elif msg_role == "assistant":
invalid_tool_calls = []
additional_kwargs: Dict = {}
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in _dict["tool_calls"]:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e:
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
else:
additional_kwargs = {}
content = msg_content or ""
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
)
elif msg_role == "system":
return SystemMessage(content=msg_content)
else:
return ChatMessage(content=msg_content, role=msg_role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
msg_role = cast(str, _dict.get("role"))
msg_content = cast(str, _dict.get("content") or "")
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
if msg_role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=msg_content)
elif msg_role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=msg_content, additional_kwargs=additional_kwargs)
elif msg_role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=msg_content, name=_dict["name"])
elif msg_role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=msg_content, tool_call_id=_dict["tool_call_id"])
elif msg_role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=msg_content, role=msg_role)
else:
return default_class(content=msg_content) # type: ignore[call-arg]
class ChatSparkLLM(BaseChatModel):
"""IFlyTek Spark chat model integration.
Setup:
To use, you should have the environment variable``IFLYTEK_SPARK_API_KEY``,
``IFLYTEK_SPARK_API_SECRET`` and ``IFLYTEK_SPARK_APP_ID``.
Key init args — completion params:
model: Optional[str]
Name of IFLYTEK SPARK model to use.
temperature: Optional[float]
Sampling temperature.
top_k: Optional[float]
What search sampling control to use.
streaming: Optional[bool]
Whether to stream the results or not.
Key init args — client params:
api_key: Optional[str]
IFLYTEK SPARK API KEY. If not passed in will be read from env var IFLYTEK_SPARK_API_KEY.
api_secret: Optional[str]
IFLYTEK SPARK API SECRET. If not passed in will be read from env var IFLYTEK_SPARK_API_SECRET.
api_url: Optional[str]
Base URL for API requests.
timeout: Optional[int]
Timeout for requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatSparkLLM
chat = ChatSparkLLM(
api_key="your-api-key",
api_secret="your-api-secret",
model='Spark4.0 Ultra',
# temperature=...,
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
chat.invoke(messages)
.. code-block:: python
AIMessage(
content='I like programming.',
response_metadata={
'token_usage': {
'question_tokens': 3,
'prompt_tokens': 16,
'completion_tokens': 4,
'total_tokens': 20
}
},
id='run-af8b3531-7bf7-47f0-bfe8-9262cb2a9d47-0'
)
Stream:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk)
.. code-block:: python
content='I' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
content=' like programming' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
content='.' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
.. code-block:: python
stream = chat.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
AIMessageChunk(
content='I like programming.',
id='run-aca2fa82-c2e4-4835-b7e2-865ddd3c46cb'
)
Response metadata
.. code-block:: python
ai_msg = chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'token_usage': {
'question_tokens': 3,
'prompt_tokens': 16,
'completion_tokens': 4,
'total_tokens': 20
}
}
""" # noqa: E501
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"spark_app_id": "IFLYTEK_SPARK_APP_ID",
"spark_api_key": "IFLYTEK_SPARK_API_KEY",
"spark_api_secret": "IFLYTEK_SPARK_API_SECRET",
"spark_api_url": "IFLYTEK_SPARK_API_URL",
"spark_llm_domain": "IFLYTEK_SPARK_LLM_DOMAIN",
}
client: Any = None #: :meta private:
spark_app_id: Optional[str] = Field(default=None, alias="app_id")
"""Automatically inferred from env var `IFLYTEK_SPARK_APP_ID`
if not provided."""
spark_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `IFLYTEK_SPARK_API_KEY`
if not provided."""
spark_api_secret: Optional[str] = Field(default=None, alias="api_secret")
"""Automatically inferred from env var `IFLYTEK_SPARK_API_SECRET`
if not provided."""
spark_api_url: Optional[str] = Field(default=None, alias="api_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
spark_llm_domain: Optional[str] = Field(default=None, alias="model")
"""Model name to use."""
spark_user_id: str = "lc_user"
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = Field(30, alias="timeout")
"""request timeout for chat http requests"""
temperature: float = Field(default=0.5)
"""What sampling temperature to use."""
top_k: int = 4
"""What search sampling control to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["spark_app_id"] = get_from_dict_or_env(
values,
["spark_app_id", "app_id"],
"IFLYTEK_SPARK_APP_ID",
)
values["spark_api_key"] = get_from_dict_or_env(
values,
["spark_api_key", "api_key"],
"IFLYTEK_SPARK_API_KEY",
)
values["spark_api_secret"] = get_from_dict_or_env(
values,
["spark_api_secret", "api_secret"],
"IFLYTEK_SPARK_API_SECRET",
)
values["spark_api_url"] = get_from_dict_or_env(
values,
"spark_api_url",
"IFLYTEK_SPARK_API_URL",
SPARK_API_URL,
)
values["spark_llm_domain"] = get_from_dict_or_env(
values,
"spark_llm_domain",
"IFLYTEK_SPARK_LLM_DOMAIN",
SPARK_LLM_DOMAIN,
)
# put extra params into model_kwargs
default_values = {
name: field.default
for name, field in get_fields(cls).items()
if field.default is not None
}
values["model_kwargs"]["temperature"] = default_values.get("temperature")
values["model_kwargs"]["top_k"] = default_values.get("top_k")
values["client"] = _SparkLLMClient(
app_id=values["spark_app_id"],
api_key=values["spark_api_key"],
api_secret=values["spark_api_secret"],
api_url=values["spark_api_url"],
spark_domain=values["spark_llm_domain"],
model_kwargs=values["model_kwargs"],
)
return values
# When using Pydantic V2
# The execution order of multiple @model_validator decorators is opposite to
# their declaration order. https://github.com/pydantic/pydantic/discussions/7434
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
default_chunk_class = AIMessageChunk
self.client.arun(
[convert_message_to_dict(m) for m in messages],
self.spark_user_id,
self.model_kwargs,
streaming=True,
)
for content in self.client.subscribe(timeout=self.request_timeout):
if "data" not in content:
continue
delta = content["data"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
if stream or self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
self.client.arun(
[convert_message_to_dict(m) for m in messages],
self.spark_user_id,
self.model_kwargs,
False,
)
completion = {}
llm_output = {}
for content in self.client.subscribe(timeout=self.request_timeout):
if "usage" in content:
llm_output["token_usage"] = content["usage"]
if "data" not in content:
continue
completion = content["data"]
message = convert_dict_to_message(completion)
generations = [ChatGeneration(message=message)]
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "spark-llm-chat"
class _SparkLLMClient:
"""
Use websocket-client to call the SparkLLM interface provided by Xfyun,
which is the iFlyTek's open platform for AI capabilities
"""
def __init__(
self,
app_id: str,
api_key: str,
api_secret: str,
api_url: Optional[str] = None,
spark_domain: Optional[str] = None,
model_kwargs: Optional[dict] = None,
):
try:
import websocket
self.websocket_client = websocket
except ImportError:
raise ImportError(
"Could not import websocket client python package. "
"Please install it with `pip install websocket-client`."
)
self.api_url = SPARK_API_URL if not api_url else api_url
self.app_id = app_id
self.model_kwargs = model_kwargs
self.spark_domain = spark_domain or SPARK_LLM_DOMAIN
self.queue: Queue[Dict] = Queue()
self.blocking_message = {"content": "", "role": "assistant"}
self.api_key = api_key
self.api_secret = api_secret
@staticmethod
def _create_url(api_url: str, api_key: str, api_secret: str) -> str:
"""
Generate a request url with an api key and an api secret.
"""
# generate timestamp by RFC1123
date = format_date_time(mktime(datetime.now().timetuple()))
# urlparse
parsed_url = urlparse(api_url)
host = parsed_url.netloc
path = parsed_url.path
signature_origin = f"host: {host}\ndate: {date}\nGET {path} HTTP/1.1"
# encrypt using hmac-sha256
signature_sha = hmac.new(
api_secret.encode("utf-8"),
signature_origin.encode("utf-8"),
digestmod=hashlib.sha256,
).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8")
authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", \
headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(
encoding="utf-8"
)
# generate url
params_dict = {"authorization": authorization, "date": date, "host": host}
encoded_params = urlencode(params_dict)
url = urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
encoded_params,
parsed_url.fragment,
)
)
return url
def run(
self,
messages: List[Dict],
user_id: str,
model_kwargs: Optional[dict] = None,
streaming: bool = False,
) -> None:
self.websocket_client.enableTrace(False)
ws = self.websocket_client.WebSocketApp(
_SparkLLMClient._create_url(
self.api_url,
self.api_key,
self.api_secret,
),
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open,
)
ws.messages = messages # type: ignore[attr-defined]
ws.user_id = user_id # type: ignore[attr-defined]
ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs # type: ignore[attr-defined]
ws.streaming = streaming # type: ignore[attr-defined]
ws.run_forever()
def arun(
self,
messages: List[Dict],
user_id: str,
model_kwargs: Optional[dict] = None,
streaming: bool = False,
) -> threading.Thread:
ws_thread = threading.Thread(
target=self.run,
args=(
messages,
user_id,
model_kwargs,
streaming,
),
)
ws_thread.start()
return ws_thread
def on_error(self, ws: Any, error: Optional[Any]) -> None:
self.queue.put({"error": error})
ws.close()
def on_close(self, ws: Any, close_status_code: int, close_reason: str) -> None:
logger.debug(
{
"log": {
"close_status_code": close_status_code,
"close_reason": close_reason,
}
}
)
self.queue.put({"done": True})
def on_open(self, ws: Any) -> None:
self.blocking_message = {"content": "", "role": "assistant"}
data = json.dumps(
self.gen_params(
messages=ws.messages, user_id=ws.user_id, model_kwargs=ws.model_kwargs
)
)
ws.send(data)
def on_message(self, ws: Any, message: str) -> None:
data = json.loads(message)
code = data["header"]["code"]
if code != 0:
self.queue.put(
{"error": f"Code: {code}, Error: {data['header']['message']}"}
)
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
if ws.streaming:
self.queue.put({"data": choices["text"][0]})
else:
self.blocking_message["content"] += content
if status == 2:
if not ws.streaming:
self.queue.put({"data": self.blocking_message})
usage_data = (
data.get("payload", {}).get("usage", {}).get("text", {})
if data
else {}
)
self.queue.put({"usage": usage_data})
ws.close()
def gen_params(
self, messages: list, user_id: str, model_kwargs: Optional[dict] = None
) -> dict:
data: Dict = {
"header": {"app_id": self.app_id, "uid": user_id},
"parameter": {"chat": {"domain": self.spark_domain}},
"payload": {"message": {"text": messages}},
}
if model_kwargs:
data["parameter"]["chat"].update(model_kwargs)
logger.debug(f"Spark Request Parameters: {data}")
return data
def subscribe(self, timeout: Optional[int] = 30) -> Generator[Dict, None, None]:
while True:
try:
content = self.queue.get(timeout=timeout)
except queue.Empty as _:
raise TimeoutError(
f"SparkLLMClient wait LLM api response timeout {timeout} seconds"
)
if "error" in content:
raise ConnectionError(content["error"])
if "usage" in content:
yield content
continue
if "done" in content:
break
if "data" not in content:
break
yield content
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py | import json
import logging
import uuid
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages.tool import tool_call_chunk
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import get_fields, is_basemodel_subclass
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
logger = logging.getLogger(__name__)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if len(message.tool_calls) != 0:
tool_call = message.tool_calls[0]
message_dict["function_call"] = {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
}
# If function call only, content is None not empty string
message_dict["content"] = None
elif isinstance(message, (FunctionMessage, ToolMessage)):
message_dict = {
"role": "function",
"content": _create_tool_content(message.content),
"name": message.name or message.additional_kwargs.get("name"),
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_tool_content(content: Union[str, List[Union[str, Dict[Any, Any]]]]) -> str:
"""Convert tool content to dict scheme."""
if isinstance(content, str):
try:
if isinstance(json.loads(content), dict):
return content
else:
return json.dumps({"tool_result": content})
except json.JSONDecodeError:
return json.dumps({"tool_result": content})
else:
return json.dumps({"tool_result": content})
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("result", "") or ""
additional_kwargs: Mapping[str, Any] = {}
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
if "thoughts" in additional_kwargs["function_call"]:
# align to api sample, which affects the llm function_call output
additional_kwargs["function_call"].pop("thoughts")
# DO NOT ADD ANY NUMERIC OBJECT TO `msg_additional_kwargs` AND `additional_kwargs`
# ALONG WITH THEIRS SUB-CONTAINERS !!!
# OR IT WILL RAISE A DEADLY EXCEPTION FROM `merge_dict`
# 不要往 `msg_additional_kwargs` 和 `additional_kwargs` 里面加任何数值类对象!
# 子容器也不行!
# 不然 `merge_dict` 会报错导致代码无法运行
additional_kwargs = {**_dict.get("body", {}), **additional_kwargs}
msg_additional_kwargs = dict(
finish_reason=additional_kwargs.get("finish_reason", ""),
request_id=additional_kwargs["id"],
object=additional_kwargs.get("object", ""),
search_info=additional_kwargs.get("search_info", []),
)
if additional_kwargs.get("function_call", {}):
msg_additional_kwargs["function_call"] = additional_kwargs.get(
"function_call", {}
)
msg_additional_kwargs["tool_calls"] = [
{
"type": "function",
"function": additional_kwargs.get("function_call", {}),
"id": str(uuid.uuid4()),
}
]
ret = AIMessage(
content=content,
additional_kwargs=msg_additional_kwargs,
)
if usage := additional_kwargs.get("usage", None):
ret.usage_metadata = UsageMetadata(
input_tokens=usage.get("prompt_tokens", 0),
output_tokens=usage.get("completion_tokens", 0),
total_tokens=usage.get("total_tokens", 0),
)
return ret
class QianfanChatEndpoint(BaseChatModel):
"""Baidu Qianfan chat model integration.
Setup:
Install ``qianfan`` and set environment variables ``QIANFAN_AK``, ``QIANFAN_SK``.
.. code-block:: bash
pip install qianfan
export QIANFAN_AK="your-api-key"
export QIANFAN_SK="your-secret_key"
Key init args — completion params:
model: str
Name of Qianfan model to use.
temperature: Optional[float]
Sampling temperature.
endpoint: Optional[str]
Endpoint of the Qianfan LLM
top_p: Optional[float]
What probability mass to use.
Key init args — client params:
timeout: Optional[int]
Timeout for requests.
api_key: Optional[str]
Qianfan API KEY. If not passed in will be read from env var QIANFAN_AK.
secret_key: Optional[str]
Qianfan SECRET KEY. If not passed in will be read from env var QIANFAN_SK.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(
model="ERNIE-3.5-8K",
temperature=0.2,
timeout=30,
# api_key="...",
# secret_key="...",
# top_p="...",
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
qianfan_chat.invoke(messages)
.. code-block:: python
AIMessage(content='I enjoy programming.', additional_kwargs={'finish_reason': 'normal', 'request_id': 'as-7848zeqn1c', 'object': 'chat.completion', 'search_info': []}, response_metadata={'token_usage': {'prompt_tokens': 16, 'completion_tokens': 4, 'total_tokens': 20}, 'model_name': 'ERNIE-3.5-8K', 'finish_reason': 'normal', 'id': 'as-7848zeqn1c', 'object': 'chat.completion', 'created': 1719153606, 'result': 'I enjoy programming.', 'is_truncated': False, 'need_clear_history': False, 'usage': {'prompt_tokens': 16, 'completion_tokens': 4, 'total_tokens': 20}}, id='run-4bca0c10-5043-456b-a5be-2f62a980f3f0-0')
Stream:
.. code-block:: python
for chunk in qianfan_chat.stream(messages):
print(chunk)
.. code-block:: python
content='I enjoy' response_metadata={'finish_reason': 'normal', 'request_id': 'as-yz0yz1w1rq', 'object': 'chat.completion', 'search_info': []} id='run-0fa9da50-003e-4a26-ba16-dbfe96249b8b' role='assistant'
content=' programming.' response_metadata={'finish_reason': 'normal', 'request_id': 'as-yz0yz1w1rq', 'object': 'chat.completion', 'search_info': []} id='run-0fa9da50-003e-4a26-ba16-dbfe96249b8b' role='assistant'
.. code-block:: python
stream = chat.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block::
AIMessageChunk(content='I enjoy programming.', response_metadata={'finish_reason': 'normalnormal', 'request_id': 'as-p63cnn3ppnas-p63cnn3ppn', 'object': 'chat.completionchat.completion', 'search_info': []}, id='run-09a8cbbd-5ded-4529-981d-5bc9d1206404')
Async:
.. code-block:: python
await qianfan_chat.ainvoke(messages)
# stream:
# async for chunk in qianfan_chat.astream(messages):
# print(chunk)
# batch:
# await qianfan_chat.abatch([messages])
.. code-block:: python
[AIMessage(content='I enjoy programming.', additional_kwargs={'finish_reason': 'normal', 'request_id': 'as-mpqa8qa1qb', 'object': 'chat.completion', 'search_info': []}, response_metadata={'token_usage': {'prompt_tokens': 16, 'completion_tokens': 4, 'total_tokens': 20}, 'model_name': 'ERNIE-3.5-8K', 'finish_reason': 'normal', 'id': 'as-mpqa8qa1qb', 'object': 'chat.completion', 'created': 1719155120, 'result': 'I enjoy programming.', 'is_truncated': False, 'need_clear_history': False, 'usage': {'prompt_tokens': 16, 'completion_tokens': 4, 'total_tokens': 20}}, id='run-443b2231-08f9-4725-b807-b77d0507ad44-0')]
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
chat_with_tools = qianfan_chat.bind_tools([GetWeather, GetPopulation])
ai_msg = chat_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
.. code-block:: python
[
{
'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': '533e5f63-a3dc-40f2-9d9c-22b1feee62e0'
}
]
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_chat = qianfan_chat.with_structured_output(Joke)
structured_chat.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(
setup='A cat is sitting in front of a mirror and sees another cat. What does the cat think?',
punchline="The cat doesn't think it's another cat, it thinks it's another mirror.",
rating=None
)
Response metadata
.. code-block:: python
ai_msg = qianfan_chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'token_usage': {
'prompt_tokens': 16,
'completion_tokens': 4,
'total_tokens': 20},
'model_name': 'ERNIE-3.5-8K',
'finish_reason': 'normal',
'id': 'as-qbzwtydqmi',
'object': 'chat.completion',
'created': 1719158153,
'result': 'I enjoy programming.',
'is_truncated': False,
'need_clear_history': False,
'usage': {
'prompt_tokens': 16,
'completion_tokens': 4,
'total_tokens': 20
}
}
""" # noqa: E501
init_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""init kwargs for qianfan client init, such as `query_per_second` which is
associated with qianfan resource object to limit QPS"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra params for model invoke using with `do`."""
client: Any = None #: :meta private:
# It could be empty due to the use of Console API
# And they're not list here
qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Qianfan API KEY"""
qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key")
"""Qianfan SECRET KEY"""
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
request_timeout: Optional[int] = Field(60, alias="timeout")
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
"""What probability mass to use."""
temperature: Optional[float] = 0.95
"""What sampling temperature to use."""
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
model: Optional[str] = Field(default=None)
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set.
Default is set by `qianfan` SDK, not here
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values, ["qianfan_ak", "api_key"], "QIANFAN_AK", default=""
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values, ["qianfan_sk", "secret_key"], "QIANFAN_SK", default=""
)
)
default_values = {
name: field.default
for name, field in get_fields(cls).items()
if field.default is not None
}
default_values.update(values)
params = {
**values.get("init_kwargs", {}),
"model": default_values.get("model"),
"stream": default_values.get("streaming"),
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if (
default_values.get("endpoint") is not None
and default_values["endpoint"] != ""
):
params["endpoint"] = default_values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ImportError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += cast(str, messages[i].content) + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model.invoke("Tell me a joke.")
"""
if self.streaming:
completion = ""
chat_generation_info: Dict = {}
usage_metadata: Optional[UsageMetadata] = None
for chunk in self._stream(messages, stop, run_manager, **kwargs):
chat_generation_info = (
chunk.generation_info
if chunk.generation_info is not None
else chat_generation_info
)
completion += chunk.text
if isinstance(chunk.message, AIMessageChunk):
usage_metadata = chunk.message.usage_metadata
lc_msg = AIMessage(
content=completion,
additional_kwargs={},
usage_metadata=usage_metadata,
)
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={
"token_usage": usage_metadata or {},
"model_name": self.model,
},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
chat_generation_info: Dict = {}
usage_metadata: Optional[UsageMetadata] = None
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
chat_generation_info = (
chunk.generation_info
if chunk.generation_info is not None
else chat_generation_info
)
completion += chunk.text
if isinstance(chunk.message, AIMessageChunk):
usage_metadata = chunk.message.usage_metadata
lc_msg = AIMessage(
content=completion,
additional_kwargs={},
usage_metadata=usage_metadata,
)
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={
"token_usage": usage_metadata or {},
"model_name": self.model,
},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
response_payload = await self.client.ado(**params)
lc_msg = _convert_dict_to_message(response_payload)
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
params["stream"] = True
for res in self.client.do(**params):
if res:
msg = _convert_dict_to_message(res)
additional_kwargs = msg.additional_kwargs.get("function_call", {})
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk( # type: ignore[call-arg]
content=msg.content,
role="assistant",
additional_kwargs=additional_kwargs,
usage_metadata=msg.usage_metadata,
tool_call_chunks=[
tool_call_chunk(
name=tc["name"],
args=json.dumps(tc["args"]),
id=tc["id"],
index=None,
)
for tc in msg.tool_calls
],
),
generation_info=msg.additional_kwargs,
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
params["stream"] = True
async for res in await self.client.ado(**params):
if res:
msg = _convert_dict_to_message(res)
additional_kwargs = msg.additional_kwargs.get("function_call", {})
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk( # type: ignore[call-arg]
content=msg.content,
role="assistant",
additional_kwargs=additional_kwargs,
usage_metadata=msg.usage_metadata,
tool_call_chunks=[
tool_call_chunk(
name=tc["name"],
args=json.dumps(tc["args"]),
id=tc["id"],
index=None,
)
for tc in msg.tool_calls
],
),
generation_info=msg.additional_kwargs,
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool)["function"] for tool in tools]
return super().bind(functions=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=True):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
Example: Function-calling, dict schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(dict_schema)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
llm = self.bind_tools([schema])
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
key_name = convert_to_openai_tool(schema)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/maritalk.py | import json
from http import HTTPStatus
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from pydantic import Field
from requests import Response
from requests.exceptions import HTTPError
class MaritalkHTTPError(HTTPError):
def __init__(self, request_obj: Response) -> None:
self.request_obj = request_obj
try:
response_json = request_obj.json()
if "detail" in response_json:
api_message = response_json["detail"]
elif "message" in response_json:
api_message = response_json["message"]
else:
api_message = response_json
except Exception:
api_message = request_obj.text
self.message = api_message
self.status_code = request_obj.status_code
def __str__(self) -> str:
status_code_meaning = HTTPStatus(self.status_code).phrase
formatted_message = f"HTTP Error: {self.status_code} - {status_code_meaning}"
formatted_message += f"\nDetail: {self.message}"
return formatted_message
class ChatMaritalk(BaseChatModel):
"""`MariTalk` Chat models API.
This class allows interacting with the MariTalk chatbot API.
To use it, you must provide an API key either through the constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatMaritalk
chat = ChatMaritalk(api_key="your_api_key_here")
"""
api_key: str
"""Your MariTalk API key."""
model: str
"""Chose one of the available models:
- `sabia-2-medium`
- `sabia-2-small`
- `sabia-2-medium-2024-03-13`
- `sabia-2-small-2024-03-13`
- `maritalk-2024-01-08` (deprecated)"""
temperature: float = Field(default=0.7, gt=0.0, lt=1.0)
"""Run inference with this temperature.
Must be in the closed interval [0.0, 1.0]."""
max_tokens: int = Field(default=512, gt=0)
"""The maximum number of tokens to generate in the reply."""
do_sample: bool = Field(default=True)
"""Whether or not to use sampling; use `True` to enable."""
top_p: float = Field(default=0.95, gt=0.0, lt=1.0)
"""Nucleus sampling parameter controlling the size of
the probability mass considered for sampling."""
@property
def _llm_type(self) -> str:
"""Identifies the LLM type as 'maritalk'."""
return "maritalk"
def parse_messages_for_model(
self, messages: List[BaseMessage]
) -> List[Dict[str, Union[str, List[Union[str, Dict[Any, Any]]]]]]:
"""
Parses messages from LangChain's format to the format expected by
the MariTalk API.
Parameters:
messages (List[BaseMessage]): A list of messages in LangChain
format to be parsed.
Returns:
A list of messages formatted for the MariTalk API.
"""
parsed_messages = []
for message in messages:
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
parsed_messages.append({"role": role, "content": message.content})
return parsed_messages
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Sends the parsed messages to the MariTalk API and returns the generated
response or an error message.
This method makes an HTTP POST request to the MariTalk API with the
provided messages and other parameters.
If the request is successful and the API returns a response,
this method returns a string containing the answer.
If the request is rate-limited or encounters another error,
it returns a string with the error message.
Parameters:
messages (List[BaseMessage]): Messages to send to the model.
stop (Optional[List[str]]): Tokens that will signal the model
to stop generating further tokens.
Returns:
str: If the API call is successful, returns the answer.
If an error occurs (e.g., rate limiting), returns a string
describing the error.
"""
url = "https://chat.maritaca.ai/api/chat/inference"
headers = {"authorization": f"Key {self.api_key}"}
stopping_tokens = stop if stop is not None else []
parsed_messages = self.parse_messages_for_model(messages)
data = {
"messages": parsed_messages,
"model": self.model,
"do_sample": self.do_sample,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"stopping_tokens": stopping_tokens,
**kwargs,
}
response = requests.post(url, json=data, headers=headers)
if response.ok:
return response.json().get("answer", "No answer found")
else:
raise MaritalkHTTPError(response)
async def _acall(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Asynchronously sends the parsed messages to the MariTalk API and returns
the generated response or an error message.
This method makes an HTTP POST request to the MariTalk API with the
provided messages and other parameters using async I/O.
If the request is successful and the API returns a response,
this method returns a string containing the answer.
If the request is rate-limited or encounters another error,
it returns a string with the error message.
"""
try:
import httpx
url = "https://chat.maritaca.ai/api/chat/inference"
headers = {"authorization": f"Key {self.api_key}"}
stopping_tokens = stop if stop is not None else []
parsed_messages = self.parse_messages_for_model(messages)
data = {
"messages": parsed_messages,
"model": self.model,
"do_sample": self.do_sample,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"stopping_tokens": stopping_tokens,
**kwargs,
}
async with httpx.AsyncClient() as client:
response = await client.post(
url, json=data, headers=headers, timeout=None
)
if response.status_code == 200:
return response.json().get("answer", "No answer found")
else:
raise MaritalkHTTPError(response) # type: ignore[arg-type]
except ImportError:
raise ImportError(
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
headers = {"Authorization": f"Key {self.api_key}"}
stopping_tokens = stop if stop is not None else []
parsed_messages = self.parse_messages_for_model(messages)
data = {
"messages": parsed_messages,
"model": self.model,
"do_sample": self.do_sample,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"stopping_tokens": stopping_tokens,
"stream": True,
**kwargs,
}
response = requests.post(
"https://chat.maritaca.ai/api/chat/inference",
data=json.dumps(data),
headers=headers,
stream=True,
)
if response.ok:
for line in response.iter_lines():
if line.startswith(b"data: "):
response_data = line.replace(b"data: ", b"").decode("utf-8")
if response_data:
parsed_data = json.loads(response_data)
if "text" in parsed_data:
delta = parsed_data["text"]
chunk = ChatGenerationChunk(
message=AIMessageChunk(content=delta)
)
if run_manager:
run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
else:
raise MaritalkHTTPError(response)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
try:
import httpx
headers = {"Authorization": f"Key {self.api_key}"}
stopping_tokens = stop if stop is not None else []
parsed_messages = self.parse_messages_for_model(messages)
data = {
"messages": parsed_messages,
"model": self.model,
"do_sample": self.do_sample,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"stopping_tokens": stopping_tokens,
"stream": True,
**kwargs,
}
async with httpx.AsyncClient() as client:
async with client.stream(
"POST",
"https://chat.maritaca.ai/api/chat/inference",
data=json.dumps(data), # type: ignore[arg-type]
headers=headers,
timeout=None,
) as response:
if response.status_code == 200:
async for line in response.aiter_lines():
if line.startswith("data: "):
response_data = line.replace("data: ", "")
if response_data:
parsed_data = json.loads(response_data)
if "text" in parsed_data:
delta = parsed_data["text"]
chunk = ChatGenerationChunk(
message=AIMessageChunk(content=delta)
)
if run_manager:
await run_manager.on_llm_new_token(
delta, chunk=chunk
)
yield chunk
else:
raise MaritalkHTTPError(response) # type: ignore[arg-type]
except ImportError:
raise ImportError(
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = await self._acall(
messages, stop=stop, run_manager=run_manager, **kwargs
)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _identifying_params(self) -> Dict[str, Any]:
"""
Identifies the key parameters of the chat model for logging
or tracking purposes.
Returns:
A dictionary of the key configuration parameters.
"""
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/hunyuan.py | import json
import logging
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
logger = logging.getLogger(__name__)
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"Role": message.role, "Content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"Role": "system", "Content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"Role": "user", "Content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"Role": "assistant", "Content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["Role"]
if role == "system":
return SystemMessage(content=_dict.get("Content", "") or "")
elif role == "user":
return HumanMessage(content=_dict["Content"])
elif role == "assistant":
return AIMessage(content=_dict.get("Content", "") or "")
else:
return ChatMessage(content=_dict["Content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("Role")
content = _dict.get("Content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["Choices"]:
message = _convert_dict_to_message(choice["Message"])
message.id = response.get("Id", "")
generations.append(ChatGeneration(message=message))
token_usage = response["Usage"]
llm_output = {"token_usage": token_usage}
return ChatResult(generations=generations, llm_output=llm_output)
class ChatHunyuan(BaseChatModel):
"""Tencent Hunyuan chat models API by Tencent.
For more information, see https://cloud.tencent.com/document/product/1729
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"hunyuan_app_id": "HUNYUAN_APP_ID",
"hunyuan_secret_id": "HUNYUAN_SECRET_ID",
"hunyuan_secret_key": "HUNYUAN_SECRET_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
hunyuan_app_id: Optional[int] = None
"""Hunyuan App ID"""
hunyuan_secret_id: Optional[str] = None
"""Hunyuan Secret ID"""
hunyuan_secret_key: Optional[SecretStr] = None
"""Hunyuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = 60
"""Timeout for requests to Hunyuan API. Default is 60 seconds."""
temperature: float = 1.0
"""What sampling temperature to use."""
top_p: float = 1.0
"""What probability mass to use."""
model: str = "hunyuan-lite"
"""What Model to use.
Optional model:
- hunyuan-lite
- hunyuan-standard
- hunyuan-standard-256K
- hunyuan-pro
- hunyuan-code
- hunyuan-role
- hunyuan-functioncall
- hunyuan-vision
"""
stream_moderation: bool = False
"""Whether to review the results or not when streaming is true."""
enable_enhancement: bool = True
"""Whether to enhancement the results or not."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
values["hunyuan_app_id"] = get_from_dict_or_env(
values,
"hunyuan_app_id",
"HUNYUAN_APP_ID",
)
values["hunyuan_secret_id"] = get_from_dict_or_env(
values,
"hunyuan_secret_id",
"HUNYUAN_SECRET_ID",
)
values["hunyuan_secret_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"hunyuan_secret_key",
"HUNYUAN_SECRET_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Hunyuan API."""
normal_params = {
"Temperature": self.temperature,
"TopP": self.top_p,
"Model": self.model,
"Stream": self.streaming,
"StreamModeration": self.stream_moderation,
"EnableEnhancement": self.enable_enhancement,
}
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
return _create_chat_result(json.loads(res.to_json_string()))
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
for chunk in res:
chunk = chunk.get("data", "")
if len(chunk) == 0:
continue
response = json.loads(chunk)
if "error" in response:
raise ValueError(f"Error from Hunyuan api response: {response}")
for choice in response["Choices"]:
chunk = _convert_delta_to_message_chunk(
choice["Delta"], default_chunk_class
)
chunk.id = response.get("Id", "")
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> Any:
if self.hunyuan_secret_key is None:
raise ValueError("Hunyuan secret key is not set.")
try:
from tencentcloud.common import credential
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
except ImportError:
raise ImportError(
"Could not import tencentcloud python package. "
"Please install it with `pip install tencentcloud-sdk-python`."
)
parameters = {**self._default_params, **kwargs}
cred = credential.Credential(
self.hunyuan_secret_id, str(self.hunyuan_secret_key.get_secret_value())
)
client = hunyuan_client.HunyuanClient(cred, "")
req = models.ChatCompletionsRequest()
params = {
"Messages": [_convert_message_to_dict(m) for m in messages],
**parameters,
}
req.from_json_string(json.dumps(params))
resp = client.ChatCompletions(req)
return resp
@property
def _llm_type(self) -> str:
return "hunyuan-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/promptlayer_openai.py | """PromptLayer wrapper."""
import datetime
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult
from langchain_community.chat_models import ChatOpenAI
class PromptLayerChatOpenAI(ChatOpenAI):
"""`PromptLayer` and `OpenAI` Chat large language models API.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerChatOpenAI adds to optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain_community.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(
messages, stop, run_manager, stream=stream, **kwargs
)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
params = {**params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerChatOpenAI",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(
messages, stop, run_manager, stream=stream, **kwargs
)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
params = {**params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerChatOpenAI.async",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
@property
def _llm_type(self) -> str:
return "promptlayer-openai-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**super()._identifying_params,
"pl_tags": self.pl_tags,
"return_pl_id": self.return_pl_id,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/huggingface.py | """Hugging Face Chat Wrapper."""
from typing import Any, AsyncIterator, Iterator, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
LLMResult,
)
from pydantic import model_validator
from typing_extensions import Self
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."""
@deprecated(
since="0.0.37",
removal="1.0",
alternative_import="langchain_huggingface.ChatHuggingFace",
)
class ChatHuggingFace(BaseChatModel):
"""
Wrapper for using Hugging Face LLM's as ChatModels.
Works with `HuggingFaceTextGenInference`, `HuggingFaceEndpoint`,
and `HuggingFaceHub` LLMs.
Upon instantiating this class, the model_id is resolved from the url
provided to the LLM, and the appropriate tokenizer is loaded from
the HuggingFace Hub.
Adapted from: https://python.langchain.com/docs/integrations/chat/llama2_chat
"""
llm: Any
"""LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint, or
HuggingFaceHub."""
system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT)
tokenizer: Any = None
model_id: Optional[str] = None
streaming: bool = False
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
from transformers import AutoTokenizer
self._resolve_model_id()
self.tokenizer = (
AutoTokenizer.from_pretrained(self.model_id)
if self.tokenizer is None
else self.tokenizer
)
@model_validator(mode="after")
def validate_llm(self) -> Self:
if not isinstance(
self.llm,
(HuggingFaceTextGenInference, HuggingFaceEndpoint, HuggingFaceHub),
):
raise TypeError(
"Expected llm to be one of HuggingFaceTextGenInference, "
f"HuggingFaceEndpoint, HuggingFaceHub, received {type(self.llm)}"
)
return self
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
request = self._to_chat_prompt(messages)
for data in self.llm.stream(request, **kwargs):
delta = data
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
request = self._to_chat_prompt(messages)
async for data in self.llm.astream(request, **kwargs):
delta = data
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
llm_input = self._to_chat_prompt(messages)
llm_result = self.llm._generate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
llm_input = self._to_chat_prompt(messages)
llm_result = await self.llm._agenerate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
def _to_chat_prompt(
self,
messages: List[BaseMessage],
) -> str:
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError("At least one HumanMessage must be provided!")
if not isinstance(messages[-1], HumanMessage):
raise ValueError("Last message must be a HumanMessage!")
messages_dicts = [self._to_chatml_format(m) for m in messages]
return self.tokenizer.apply_chat_template(
messages_dicts, tokenize=False, add_generation_prompt=True
)
def _to_chatml_format(self, message: BaseMessage) -> dict:
"""Convert LangChain message to ChatML format."""
if isinstance(message, SystemMessage):
role = "system"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, HumanMessage):
role = "user"
else:
raise ValueError(f"Unknown message type: {type(message)}")
return {"role": role, "content": message.content}
@staticmethod
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
chat_generations = []
for g in llm_result.generations[0]:
chat_generation = ChatGeneration(
message=AIMessage(content=g.text), generation_info=g.generation_info
)
chat_generations.append(chat_generation)
return ChatResult(
generations=chat_generations, llm_output=llm_result.llm_output
)
def _resolve_model_id(self) -> None:
"""Resolve the model_id from the LLM's inference_server_url"""
from huggingface_hub import list_inference_endpoints
available_endpoints = list_inference_endpoints("*")
if isinstance(self.llm, HuggingFaceHub) or (
hasattr(self.llm, "repo_id") and self.llm.repo_id
):
self.model_id = self.llm.repo_id
return
elif isinstance(self.llm, HuggingFaceTextGenInference):
endpoint_url: Optional[str] = self.llm.inference_server_url
else:
endpoint_url = self.llm.endpoint_url
for endpoint in available_endpoints:
if endpoint.url == endpoint_url:
self.model_id = endpoint.repository
if not self.model_id:
raise ValueError(
"Failed to resolve model_id:"
f"Could not find model id for inference server: {endpoint_url}"
"Make sure that your Hugging Face token has access to the endpoint."
)
@property
def _llm_type(self) -> str:
return "huggingface-chat-wrapper"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/deepinfra.py | """deepinfra.com chat models wrapper"""
from __future__ import annotations
import json
import logging
from json import JSONDecodeError
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import aiohttp
import requests
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.messages.tool import ToolCall
from langchain_core.messages.tool import tool_call as create_tool_call
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import Self
from langchain_community.utilities.requests import Requests
logger = logging.getLogger(__name__)
class ChatDeepInfraException(Exception):
"""Exception raised when the DeepInfra API returns an error."""
pass
def _create_retry_decorator(
llm: ChatDeepInfra,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions."""
return create_base_retry_decorator(
error_types=[requests.exceptions.ConnectTimeout, ChatDeepInfraException],
max_retries=llm.max_retries,
run_manager=run_manager,
)
def _parse_tool_calling(tool_call: dict) -> ToolCall:
"""
Convert a tool calling response from server to a ToolCall object.
Args:
tool_call:
Returns:
"""
name = tool_call["function"].get("name", "")
try:
args = json.loads(tool_call["function"]["arguments"])
except (JSONDecodeError, TypeError):
args = {}
id = tool_call.get("id")
return create_tool_call(name=name, args=args, id=id)
def _convert_to_tool_calling(tool_call: ToolCall) -> Dict[str, Any]:
"""
Convert a ToolCall object to a tool calling request for server.
Args:
tool_call:
Returns:
"""
return {
"type": "function",
"function": {
"arguments": json.dumps(tool_call["args"]),
"name": tool_call["name"],
},
"id": tool_call.get("id"),
}
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
content = _dict.get("content", "") or ""
tool_calls_content = _dict.get("tool_calls", []) or []
tool_calls = [
_parse_tool_calling(tool_call) for tool_call in tool_calls_content
]
return AIMessage(content=content, tool_calls=tool_calls)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
tool_calls = _dict.get("tool_calls") or []
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
tool_calls = [_parse_tool_calling(tool_call) for tool_call in tool_calls]
return AIMessageChunk(content=content, tool_calls=tool_calls)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
tool_calls = [
_convert_to_tool_calling(tool_call) for tool_call in message.tool_calls
]
message_dict = {
"role": "assistant",
"content": message.content,
"tool_calls": tool_calls, # type: ignore[dict-item]
}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"name": message.name, # type: ignore[dict-item]
"tool_call_id": message.tool_call_id,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatDeepInfra(BaseChatModel):
"""A chat model that uses the DeepInfra API."""
# client: Any #: :meta private:
model_name: str = Field(default="meta-llama/Llama-2-70b-chat-hf", alias="model")
"""Model name to use."""
deepinfra_api_token: Optional[str] = None
request_timeout: Optional[float] = Field(default=None, alias="timeout")
temperature: Optional[float] = 1
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Run inference with this temperature. Must be in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
max_tokens: int = 256
streaming: bool = False
max_retries: int = 1
model_config = ConfigDict(
populate_by_name=True,
)
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"request_timeout": self.request_timeout,
**self.model_kwargs,
}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
return {**self._default_params}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
try:
request_timeout = kwargs.pop("request_timeout")
request = Requests(headers=self._headers())
response = request.post(
url=self._url(), data=self._body(kwargs), timeout=request_timeout
)
self._handle_status(response.status_code, response.text)
return response
except Exception as e:
print("EX", e) # noqa: T201
raise
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
self,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
try:
request_timeout = kwargs.pop("request_timeout")
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(kwargs), timeout=request_timeout
) as response:
self._handle_status(response.status, response.text)
return await response.json()
except Exception as e:
print("EX", e) # noqa: T201
raise
return await _completion_with_retry(**kwargs)
@model_validator(mode="before")
@classmethod
def init_defaults(cls, values: Dict) -> Any:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
# For compatibility with LiteLLM
api_key = get_from_dict_or_env(
values,
"deepinfra_api_key",
"DEEPINFRA_API_KEY",
default="",
)
values["deepinfra_api_token"] = get_from_dict_or_env(
values,
"deepinfra_api_token",
"DEEPINFRA_API_TOKEN",
default=api_key,
)
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
if self.temperature is not None and not 0 <= self.temperature <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if self.top_p is not None and not 0 <= self.top_p <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if self.top_k is not None and self.top_k <= 0:
raise ValueError("top_k must be positive")
return self
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response.json())
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model": self.model_name}
res = ChatResult(generations=generations, llm_output=llm_output)
return res
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
cg_chunk = ChatGenerationChunk(message=chunk, generation_info=None)
if run_manager:
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {"messages": message_dicts, "stream": True, **params, **kwargs}
request_timeout = params.pop("request_timeout")
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(params), timeout=request_timeout
) as response:
async for line in _parse_stream_async(response.content):
chunk = _handle_sse_line(line)
if chunk:
cg_chunk = ChatGenerationChunk(message=chunk, generation_info=None)
if run_manager:
await run_manager.on_llm_new_token(
str(chunk.content), chunk=cg_chunk
)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {"messages": message_dicts, **params, **kwargs}
res = await self.acompletion_with_retry(run_manager=run_manager, **params)
return self._create_chat_result(res)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model_name,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "deepinfra-chat"
def _handle_status(self, code: int, text: Any) -> None:
if code >= 500:
raise ChatDeepInfraException(
f"DeepInfra Server error status {code}: {text}"
)
elif code >= 400:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
f"DeepInfra returned an unexpected response with status "
f"{code}: {text}"
)
def _url(self) -> str:
return "https://stage.api.deepinfra.com/v1/openai/chat/completions"
def _headers(self) -> Dict:
return {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
def _body(self, kwargs: Any) -> Dict:
return kwargs
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
async def _parse_stream_async(rbody: aiohttp.StreamReader) -> AsyncIterator[str]:
async for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
def _parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace
line = line[len(b"data: ") :]
else:
line = line[len(b"data:") :]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def _handle_sse_line(line: str) -> Optional[BaseMessageChunk]:
try:
obj = json.loads(line)
default_chunk_class = AIMessageChunk
delta = obj.get("choices", [{}])[0].get("delta", {})
return _convert_delta_to_message_chunk(delta, default_chunk_class)
except Exception:
return None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/friendli.py | from __future__ import annotations
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.friendli import BaseFriendli
def get_role(message: BaseMessage) -> str:
"""Get role of the message.
Args:
message (BaseMessage): The message object.
Raises:
ValueError: Raised when the message is of an unknown type.
Returns:
str: The role of the message.
"""
if isinstance(message, ChatMessage) or isinstance(message, HumanMessage):
return "user"
if isinstance(message, AIMessage):
return "assistant"
if isinstance(message, SystemMessage):
return "system"
raise ValueError(f"Got unknown type {message}")
def get_chat_request(messages: List[BaseMessage]) -> Dict[str, Any]:
"""Get a request of the Friendli chat API.
Args:
messages (List[BaseMessage]): Messages comprising the conversation so far.
Returns:
Dict[str, Any]: The request for the Friendli chat API.
"""
return {
"messages": [
{"role": get_role(message), "content": message.content}
for message in messages
]
}
class ChatFriendli(BaseChatModel, BaseFriendli):
"""Friendli LLM for chat.
``friendli-client`` package should be installed with `pip install friendli-client`.
You must set ``FRIENDLI_TOKEN`` environment variable or provide the value of your
personal access token for the ``friendli_token`` argument.
Example:
.. code-block:: python
from langchain_community.chat_models import FriendliChat
chat = Friendli(
model="llama-2-13b-chat", friendli_token="YOUR FRIENDLI TOKEN"
)
chat.invoke("What is generative AI?")
"""
model: str = "llama-2-13b-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"friendli_token": "FRIENDLI_TOKEN"}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Friendli completions API."""
return {
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"max_tokens": self.max_tokens,
"stop": self.stop,
"temperature": self.temperature,
"top_p": self.top_p,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {"model": self.model, **self._default_params}
@property
def _llm_type(self) -> str:
return "friendli-chat"
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop"] = self.stop
else:
params["stop"] = stop
return {**params, **kwargs}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._get_invocation_params(stop=stop, **kwargs)
stream = self.client.chat.completions.create(
**get_chat_request(messages), stream=True, model=self.model, **params
)
for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
if run_manager:
run_manager.on_llm_new_token(delta)
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._get_invocation_params(stop=stop, **kwargs)
stream = await self.async_client.chat.completions.create(
**get_chat_request(messages), stream=True, model=self.model, **params
)
async for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
if run_manager:
await run_manager.on_llm_new_token(delta)
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
params = self._get_invocation_params(stop=stop, **kwargs)
response = self.client.chat.completions.create(
messages=[
{
"role": get_role(message),
"content": message.content,
}
for message in messages
],
stream=False,
model=self.model,
**params,
)
message = AIMessage(content=response.choices[0].message.content)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
params = self._get_invocation_params(stop=stop, **kwargs)
response = await self.async_client.chat.completions.create(
messages=[
{
"role": get_role(message),
"content": message.content,
}
for message in messages
],
stream=False,
model=self.model,
**params,
)
message = AIMessage(content=response.choices[0].message.content)
return ChatResult(generations=[ChatGeneration(message=message)])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/everlyai.py | """EverlyAI Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from __future__ import annotations
import logging
import sys
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Set,
Type,
Union,
)
from langchain_core.messages import BaseMessage
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain_community.adapters.openai import convert_message_to_dict
from langchain_community.chat_models.openai import (
ChatOpenAI,
_import_tiktoken,
)
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class ChatEverlyAI(ChatOpenAI):
"""`EverlyAI` Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``EVERLYAI_API_KEY`` set with your API key.
Alternatively, you can use the everlyai_api_key keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatEverlyAI
chat = ChatEverlyAI(model_name="meta-llama/Llama-2-7b-chat-hf")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "everlyai-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"everlyai_api_key": "EVERLYAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
everlyai_api_key: Optional[str] = None
"""EverlyAI Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
everlyai_api_base: str = DEFAULT_API_BASE
"""Base URL path for API requests."""
available_models: Optional[Set[str]] = None
"""Available models from EverlyAI API."""
@staticmethod
def get_available_models() -> Set[str]:
"""Get available models from EverlyAI API."""
# EverlyAI doesn't yet support dynamically query for available models.
return set(
[
"meta-llama/Llama-2-7b-chat-hf",
"meta-llama/Llama-2-13b-chat-hf-quantized",
]
)
@model_validator(mode="before")
@classmethod
def validate_environment_override(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"everlyai_api_key",
"EVERLYAI_API_KEY",
)
)
values["openai_api_base"] = DEFAULT_API_BASE
try:
import openai
except ImportError as e:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`.",
) from e
try:
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`.",
) from exc
if "model_name" not in values.keys():
values["model_name"] = DEFAULT_MODEL
model_name = values["model_name"]
available_models = cls.get_available_models()
if model_name not in available_models:
raise ValueError(
f"Model name {model_name} not found in available models: "
f"{available_models}.",
)
values["available_models"] = available_models
return values
def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]
] = None,
) -> int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if tools is not None:
warnings.warn(
"Counting tokens in tool schemas is not yet supported. Ignoring tools."
)
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/gpt_router.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, Field, SecretStr, model_validator
from typing_extensions import Self
from langchain_community.adapters.openai import (
convert_dict_to_message,
convert_message_to_dict,
)
from langchain_community.chat_models.openai import _convert_delta_to_message_chunk
if TYPE_CHECKING:
from gpt_router.models import ChunkedGenerationResponse, GenerationResponse
logger = logging.getLogger(__name__)
DEFAULT_API_BASE_URL = "https://gpt-router-preview.writesonic.com"
class GPTRouterException(Exception):
"""Error with the `GPTRouter APIs`"""
class GPTRouterModel(BaseModel):
"""GPTRouter model."""
name: str
provider_name: str
def get_ordered_generation_requests(
models_priority_list: List[GPTRouterModel], **kwargs: Any
) -> List:
"""
Return the body for the model router input.
"""
from gpt_router.models import GenerationParams, ModelGenerationRequest
return [
ModelGenerationRequest(
model_name=model.name,
provider_name=model.provider_name,
order=index + 1,
prompt_params=GenerationParams(**kwargs),
)
for index, model in enumerate(models_priority_list)
]
def _create_retry_decorator(
llm: GPTRouter,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
from gpt_router import exceptions
errors = [
exceptions.GPTRouterApiTimeoutError,
exceptions.GPTRouterInternalServerError,
exceptions.GPTRouterNotAvailableError,
exceptions.GPTRouterTooManyRequestsError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def completion_with_retry(
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, Generator[ChunkedGenerationResponse, None, None]]:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs
)
return llm.client.generate(
ordered_generation_requests=ordered_generation_requests,
is_stream=kwargs.get("stream", False),
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, AsyncGenerator[ChunkedGenerationResponse, None]]:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs
)
return await llm.client.agenerate(
ordered_generation_requests=ordered_generation_requests,
is_stream=kwargs.get("stream", False),
)
return await _completion_with_retry(**kwargs)
class GPTRouter(BaseChatModel):
"""GPTRouter by Writesonic Inc.
For more information, see https://gpt-router.writesonic.com/docs
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
models_priority_list: List[GPTRouterModel] = Field(min_length=1)
gpt_router_api_base: str = Field(default="")
"""WriteSonic GPTRouter custom endpoint"""
gpt_router_api_key: Optional[SecretStr] = None
"""WriteSonic GPTRouter API Key"""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
max_retries: int = 4
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 256
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["gpt_router_api_base"] = get_from_dict_or_env(
values,
"gpt_router_api_base",
"GPT_ROUTER_API_BASE",
DEFAULT_API_BASE_URL,
)
values["gpt_router_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"gpt_router_api_key",
"GPT_ROUTER_API_KEY",
)
)
return values
@model_validator(mode="after")
def post_init(self) -> Self:
try:
from gpt_router.client import GPTRouterClient
except ImportError:
raise GPTRouterException(
"Could not import GPTRouter python package. "
"Please install it with `pip install GPTRouter`."
)
gpt_router_client = GPTRouterClient(
self.gpt_router_api_base,
self.gpt_router_api_key.get_secret_value()
if self.gpt_router_api_key
else None,
)
self.client = gpt_router_client
return self
@property
def lc_secrets(self) -> Dict[str, str]:
return {"gpt_router_api_key": "GPT_ROUTER_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "gpt-router-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{"models_priority_list": self.models_priority_list},
**self._default_params,
}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GPTRouter API."""
return {
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": False}
response = completion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": False}
response = await acompletion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
return self._create_chat_result(response)
def _create_chat_generation_chunk(
self, data: Mapping[str, Any], default_chunk_class: Type[BaseMessageChunk]
) -> Tuple[ChatGenerationChunk, Type[BaseMessageChunk]]:
chunk = _convert_delta_to_message_chunk(
{"content": data.get("text", "")}, default_chunk_class
)
finish_reason = data.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
gen_chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
return gen_chunk, default_chunk_class
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
generator_response = completion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
for chunk in generator_response:
if chunk.event != "update":
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(
chunk.data, default_chunk_class
)
if run_manager:
run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
generator_response = acompletion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
async for chunk in await generator_response:
if chunk.event != "update":
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(
chunk.data, default_chunk_class
)
if run_manager:
await run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
yield chunk
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: GenerationResponse) -> ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message(
{
"role": "assistant",
"content": res.text,
}
)
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.finish_reason),
)
generations.append(gen)
llm_output = {"token_usage": response.meta, "model": response.model}
return ChatResult(generations=generations, llm_output=llm_output)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/llamacpp.py | import json
from operator import itemgetter
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.tool import InvalidToolCall, ToolCall, ToolCallChunk
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import (
BaseModel,
Field,
model_validator,
)
from typing_extensions import Self
class ChatLlamaCpp(BaseChatModel):
"""llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
"""
client: Any = None #: :meta private:
model_path: str
"""The path to the Llama model file."""
lora_base: Optional[str] = None
"""The path to the Llama LoRA base model."""
lora_path: Optional[str] = None
"""The path to the Llama LoRA. If None, no LoRa is loaded."""
n_ctx: int = 512
"""Token context window."""
n_parts: int = -1
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = -1
"""Seed. If -1, a random seed is used."""
f16_kv: bool = True
"""Use half-precision for key/value cache."""
logits_all: bool = False
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = False
"""Only load the vocabulary, no weights."""
use_mlock: bool = False
"""Force system to keep model in RAM."""
n_threads: Optional[int] = None
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: int = 8
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = None
"""Number of layers to be loaded into gpu memory. Default None."""
suffix: Optional[str] = None
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: int = 256
"""The maximum number of tokens to generate."""
temperature: float = 0.8
"""The temperature to use for sampling."""
top_p: float = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = None
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: bool = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = None
"""A list of strings to stop generation when encountered."""
repeat_penalty: float = 1.1
"""The penalty to apply to repeated tokens."""
top_k: int = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: int = 64
"""The number of tokens to look back when applying the repeat_penalty."""
use_mmap: bool = True
"""Whether to keep the model loaded in RAM"""
rope_freq_scale: float = 1.0
"""Scale factor for rope sampling."""
rope_freq_base: float = 10000.0
"""Base frequency for rope sampling."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Any additional parameters to pass to llama_cpp.Llama."""
streaming: bool = True
"""Whether to stream the results, token by token."""
grammar_path: Optional[Union[str, Path]] = None
"""
grammar_path: Path to the .gbnf file that defines formal grammars
for constraining model outputs. For instance, the grammar can be used
to force the model to generate valid JSON or to speak exclusively in emojis. At most
one of grammar_path and grammar should be passed in.
"""
grammar: Any = None
"""
grammar: formal grammar for constraining model outputs. For instance, the grammar
can be used to force the model to generate valid JSON or to speak exclusively in
emojis. At most one of grammar_path and grammar should be passed in.
"""
verbose: bool = True
"""Print verbose output to stderr."""
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that llama-cpp-python library is installed."""
try:
from llama_cpp import Llama, LlamaGrammar
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
model_path = self.model_path
model_param_names = [
"rope_freq_scale",
"rope_freq_base",
"lora_path",
"lora_base",
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"use_mmap",
"last_n_tokens_size",
"verbose",
]
model_params = {k: getattr(self, k) for k in model_param_names}
# For backwards compatibility, only include if non-null.
if self.n_gpu_layers is not None:
model_params["n_gpu_layers"] = self.n_gpu_layers
model_params.update(self.model_kwargs)
try:
self.client = Llama(model_path, **model_params)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
if self.grammar and self.grammar_path:
grammar = self.grammar
grammar_path = self.grammar_path
raise ValueError(
"Can only pass in one of grammar and grammar_path. Received "
f"{grammar=} and {grammar_path=}."
)
elif isinstance(self.grammar, str):
self.grammar = LlamaGrammar.from_string(self.grammar)
elif self.grammar_path:
self.grammar = LlamaGrammar.from_file(self.grammar_path)
else:
pass
return self
def _get_parameters(self, stop: Optional[List[str]]) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
params = self._default_params
# llama_cpp expects the "stop" key not this, so we remove it:
stop_sequences = params.pop("stop_sequences")
# then sets it as configured, or default to an empty list:
params["stop"] = stop or stop_sequences or self.stop or []
return params
def _create_message_dicts(
self, messages: List[BaseMessage]
) -> List[Dict[str, Any]]:
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts
def _create_chat_result(self, response: dict) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(message=message, generation_info=generation_info)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {
"token_usage": token_usage,
# "system_fingerprint": response.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
params = {**self._get_parameters(stop), **kwargs}
# Check tool_choice is whether available, if yes then run no stream with tool
# calling
if self.streaming and not params.get("tool_choice"):
stream_iter = self._stream(messages, run_manager=run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts = self._create_message_dicts(messages)
response = self.client.create_chat_completion(messages=message_dicts, **params)
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = {**self._get_parameters(stop), **kwargs}
message_dicts = self._create_message_dicts(messages)
result = self.client.create_chat_completion(
messages=message_dicts, stream=True, **params
)
default_chunk_class = AIMessageChunk
count = 0
for chunk in result:
count += 1
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
if choice["delta"] is None:
continue
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs)
yield chunk
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[Union[dict, bool, str]] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model
tool_choice: does not currently support "any", "auto" choices like OpenAI
tool-calling API. should be a dict of the form to force this tool
{"type": "function", "function": {"name": <<tool_name>>}}.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
tool_names = [ft["function"]["name"] for ft in formatted_tools]
if tool_choice:
if isinstance(tool_choice, dict):
if not any(
tool_choice["function"]["name"] == name for name in tool_names
):
raise ValueError(
f"Tool choice {tool_choice=} was specified, but the only "
f"provided tools were {tool_names}."
)
elif isinstance(tool_choice, str):
chosen = [
f for f in formatted_tools if f["function"]["name"] == tool_choice
]
if not chosen:
raise ValueError(
f"Tool choice {tool_choice=} was specified, but the only "
f"provided tools were {tool_names}."
)
elif isinstance(tool_choice, bool):
if len(formatted_tools) > 1:
raise ValueError(
"tool_choice=True can only be specified when a single tool is "
f"passed in. Received {len(tools)} tools."
)
tool_choice = formatted_tools[0]
else:
raise ValueError(
"""Unrecognized tool_choice type. Expected dict having format like
this {"type": "function", "function": {"name": <<tool_name>>}}"""
f"Received: {tool_choice}"
)
kwargs["tool_choice"] = tool_choice
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Optional[Union[Dict, Type[BaseModel]]] = None,
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec or be a valid JSON schema
with top level 'title' and 'description' keys specified.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
kwargs: Any other args to bind to model, ``self.bind(..., **kwargs)``.
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
Example: Pydantic schema (include_raw=False):
.. code-block:: python
from langchain_community.chat_models import ChatLlamaCpp
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatLlamaCpp(
temperature=0.,
model_path="./SanctumAI-meta-llama-3-8b-instruct.Q8_0.gguf",
n_ctx=10000,
n_gpu_layers=4,
n_batch=200,
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
top_p=0.5,
stop=["<|end_of_text|>", "<|eot_id|>"],
)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
Example: Pydantic schema (include_raw=True):
.. code-block:: python
from langchain_community.chat_models import ChatLlamaCpp
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatLlamaCpp(
temperature=0.,
model_path="./SanctumAI-meta-llama-3-8b-instruct.Q8_0.gguf",
n_ctx=10000,
n_gpu_layers=4,
n_batch=200,
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
top_p=0.5,
stop=["<|end_of_text|>", "<|eot_id|>"],
)
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
Example: dict schema (include_raw=False):
.. code-block:: python
from langchain_community.chat_models import ChatLlamaCpp
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = ChatLlamaCpp(
temperature=0.,
model_path="./SanctumAI-meta-llama-3-8b-instruct.Q8_0.gguf",
n_ctx=10000,
n_gpu_layers=4,
n_batch=200,
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
top_p=0.5,
stop=["<|end_of_text|>", "<|eot_id|>"],
)
structured_llm = llm.with_structured_output(dict_schema)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
if schema is None:
raise ValueError(
"schema must be specified when method is 'function_calling'. "
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
tool_choice = {"type": "function", "function": {"name": tool_name}}
llm = self.bind_tools([schema], tool_choice=tool_choice)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[cast(Type, schema)], first_tool_only=True
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
# The model name allows users to specify custom token counting
# rules in LLM monitoring applications (e.g., in LangSmith users
# can provide per token pricing for their model and monitor
# costs for the given LLM.)
**{"model_path": self.model_path},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "llama-cpp-python"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling create_chat_completion."""
params: Dict = {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"logprobs": self.logprobs,
"stop_sequences": self.stop, # key here is convention among LLM classes
"repeat_penalty": self.repeat_penalty,
}
if self.grammar:
params["grammar"] = self.grammar
return params
def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"]),
},
}
def _lc_invalid_tool_call_to_openai_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict.get("role")
name = _dict.get("name")
id_ = _dict.get("id")
if role == "user":
return HumanMessage(content=_dict.get("content", ""), id=id_, name=name)
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs: Dict = {}
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
tool_calls = []
invalid_tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
try:
tc = parse_tool_call(raw_tool_call, return_id=True)
except Exception as e:
invalid_tc = make_invalid_tool_call(raw_tool_call, str(e))
invalid_tool_calls.append(invalid_tc)
else:
if not tc:
continue
else:
tool_calls.append(tc)
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
name=name,
id=id_,
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=invalid_tool_calls,
)
elif role == "system":
return SystemMessage(content=_dict.get("content", ""), name=name, id=id_)
elif role == "function":
return FunctionMessage(
content=_dict.get("content", ""), name=cast(str, _dict.get("name")), id=id_
)
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict.get("content", ""),
tool_call_id=cast(str, _dict.get("tool_call_id")),
additional_kwargs=additional_kwargs,
name=name,
id=id_,
)
else:
return ChatMessage(
content=_dict.get("content", ""), role=cast(str, role), id=id_
)
def _format_message_content(content: Any) -> Any:
"""Format message content."""
if content and isinstance(content, list):
# Remove unexpected block types
formatted_content = []
for block in content:
if (
isinstance(block, dict)
and "type" in block
and block["type"] == "tool_use"
):
continue
else:
formatted_content.append(block)
else:
formatted_content = content
return formatted_content
def _convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any] = {
"content": _format_message_content(message.content),
}
if (name := message.name or message.additional_kwargs.get("name")) is not None:
message_dict["name"] = name
# populate role and additional message data
if isinstance(message, ChatMessage):
message_dict["role"] = message.role
elif isinstance(message, HumanMessage):
message_dict["role"] = "user"
elif isinstance(message, AIMessage):
message_dict["role"] = "assistant"
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
if message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_openai_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_openai_tool_call(tc)
for tc in message.invalid_tool_calls
]
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
tool_call_supported_props = {"id", "type", "function"}
message_dict["tool_calls"] = [
{k: v for k, v in tool_call.items() if k in tool_call_supported_props}
for tool_call in message_dict["tool_calls"]
]
else:
pass
# If tool calls present, content null value should be None not empty string.
if "function_call" in message_dict or "tool_calls" in message_dict:
message_dict["content"] = message_dict["content"] or None
elif isinstance(message, SystemMessage):
message_dict["role"] = "system"
elif isinstance(message, FunctionMessage):
message_dict["role"] = "function"
elif isinstance(message, ToolMessage):
message_dict["role"] = "tool"
message_dict["tool_call_id"] = message.tool_call_id
supported_props = {"content", "role", "tool_call_id"}
message_dict = {k: v for k, v in message_dict.items() if k in supported_props}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
id_ = _dict.get("id")
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
tool_call_chunks = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for rtc in raw_tool_calls:
try:
tool_call = ToolCallChunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
tool_call_chunks.append(tool_call)
except KeyError:
pass
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content, id=id_)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
id=id_,
tool_call_chunks=tool_call_chunks,
)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content, id=id_)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(
content=content, tool_call_id=_dict["tool_call_id"], id=id_
)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role, id=id_)
else:
return default_class(content=content, id=id_) # type: ignore
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/cohere.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from pydantic import ConfigDict
from langchain_community.llms.cohere import BaseCohere
def get_role(message: BaseMessage) -> str:
"""Get the role of the message.
Args:
message: The message.
Returns:
The role of the message.
Raises:
ValueError: If the message is of an unknown type.
"""
if isinstance(message, ChatMessage) or isinstance(message, HumanMessage):
return "User"
elif isinstance(message, AIMessage):
return "Chatbot"
elif isinstance(message, SystemMessage):
return "System"
else:
raise ValueError(f"Got unknown type {message}")
def get_cohere_chat_request(
messages: List[BaseMessage],
*,
connectors: Optional[List[Dict[str, str]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Get the request for the Cohere chat API.
Args:
messages: The messages.
connectors: The connectors.
**kwargs: The keyword arguments.
Returns:
The request for the Cohere chat API.
"""
documents = (
None
if "source_documents" not in kwargs
else [
{
"snippet": doc.page_content,
"id": doc.metadata.get("id") or f"doc-{str(i)}",
}
for i, doc in enumerate(kwargs["source_documents"])
]
)
kwargs.pop("source_documents", None)
maybe_connectors = connectors if documents is None else None
# by enabling automatic prompt truncation, the probability of request failure is
# reduced with minimal impact on response quality
prompt_truncation = (
"AUTO" if documents is not None or connectors is not None else None
)
req = {
"message": messages[-1].content,
"chat_history": [
{"role": get_role(x), "message": x.content} for x in messages[:-1]
],
"documents": documents,
"connectors": maybe_connectors,
"prompt_truncation": prompt_truncation,
**kwargs,
}
return {k: v for k, v in req.items() if v is not None}
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.ChatCohere"
)
class ChatCohere(BaseChatModel, BaseCohere):
"""`Cohere` chat large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatCohere
from langchain_core.messages import HumanMessage
chat = ChatCohere(max_tokens=256, temperature=0.75)
messages = [HumanMessage(content="knock knock")]
chat.invoke(messages)
"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "cohere-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
if hasattr(self.client, "chat_stream"): # detect and support sdk v5
stream = self.client.chat_stream(**request)
else:
stream = self.client.chat(**request, stream=True)
for data in stream:
if data.event_type == "text-generation":
delta = data.text
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
if hasattr(self.async_client, "chat_stream"): # detect and support sdk v5
stream = await self.async_client.chat_stream(**request)
else:
stream = await self.async_client.chat(**request, stream=True)
async for data in stream:
if data.event_type == "text-generation":
delta = data.text
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
def _get_generation_info(self, response: Any) -> Dict[str, Any]:
"""Get the generation info from cohere API response."""
return {
"documents": response.documents,
"citations": response.citations,
"search_results": response.search_results,
"search_queries": response.search_queries,
"token_count": response.token_count,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = self._get_generation_info(response)
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = self._get_generation_info(response)
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
return len(self.client.tokenize(text=text).tokens)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/google_palm.py | """Wrapper around Google's PaLM Chat API."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
)
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, SecretStr
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
if TYPE_CHECKING:
import google.generativeai as genai
logger = logging.getLogger(__name__)
class ChatGooglePalmError(Exception):
"""Error with the `Google PaLM` API."""
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates text at the earliest stop token found."""
if stop is None:
return text
for stop_token in stop:
stop_token_idx = text.find(stop_token)
if stop_token_idx != -1:
text = text[:stop_token_idx]
return text
def _response_to_result(
response: genai.types.ChatResponse,
stop: Optional[List[str]],
) -> ChatResult:
"""Converts a PaLM API response into a LangChain ChatResult."""
if not response.candidates:
raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
generations: List[ChatGeneration] = []
for candidate in response.candidates:
author = candidate.get("author")
if author is None:
raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}")
content = _truncate_at_stop_tokens(candidate.get("content", ""), stop)
if content is None:
raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}")
if author == "ai":
generations.append(
ChatGeneration(text=content, message=AIMessage(content=content))
)
elif author == "human":
generations.append(
ChatGeneration(
text=content,
message=HumanMessage(content=content),
)
)
else:
generations.append(
ChatGeneration(
text=content,
message=ChatMessage(role=author, content=content),
)
)
return ChatResult(generations=generations)
def _messages_to_prompt_dict(
input_messages: List[BaseMessage],
) -> genai.types.MessagePromptDict:
"""Converts a list of LangChain messages into a PaLM API MessagePrompt structure."""
import google.generativeai as genai
context: str = ""
examples: List[genai.types.MessageDict] = []
messages: List[genai.types.MessageDict] = []
remaining = list(enumerate(input_messages))
while remaining:
index, input_message = remaining.pop(0)
if isinstance(input_message, SystemMessage):
if index != 0:
raise ChatGooglePalmError("System message must be first input message.")
context = cast(str, input_message.content)
elif isinstance(input_message, HumanMessage) and input_message.example:
if messages:
raise ChatGooglePalmError(
"Message examples must come before other messages."
)
_, next_input_message = remaining.pop(0)
if isinstance(next_input_message, AIMessage) and next_input_message.example:
examples.extend(
[
genai.types.MessageDict(
author="human", content=input_message.content
),
genai.types.MessageDict(
author="ai", content=next_input_message.content
),
]
)
else:
raise ChatGooglePalmError(
"Human example message must be immediately followed by an "
" AI example response."
)
elif isinstance(input_message, AIMessage) and input_message.example:
raise ChatGooglePalmError(
"AI example message must be immediately preceded by a Human "
"example message."
)
elif isinstance(input_message, AIMessage):
messages.append(
genai.types.MessageDict(author="ai", content=input_message.content)
)
elif isinstance(input_message, HumanMessage):
messages.append(
genai.types.MessageDict(author="human", content=input_message.content)
)
elif isinstance(input_message, ChatMessage):
messages.append(
genai.types.MessageDict(
author=input_message.role, content=input_message.content
)
)
else:
raise ChatGooglePalmError(
"Messages without an explicit role not supported by PaLM API."
)
return genai.types.MessagePromptDict(
context=context,
examples=examples,
messages=messages,
)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _chat_with_retry(**kwargs: Any) -> Any:
return llm.client.chat(**kwargs)
return _chat_with_retry(**kwargs)
async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
async def _achat_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.chat_async(**kwargs)
return await _achat_with_retry(**kwargs)
class ChatGooglePalm(BaseChatModel, BaseModel):
"""`Google PaLM` Chat models API.
To use you must have the google.generativeai Python package installed and
either:
1. The ``GOOGLE_API_KEY`` environment variable set with your API key, or
2. Pass your API key using the google_api_key kwarg to the ChatGoogle
constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatGooglePalm
chat = ChatGooglePalm()
"""
client: Any #: :meta private:
model_name: str = "models/chat-bison-001"
"""Model name to use."""
google_api_key: Optional[SecretStr] = None
temperature: Optional[float] = None
"""Run inference with this temperature. Must be in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"google_api_key": "GOOGLE_API_KEY"}
@classmethod
def is_lc_serializable(self) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "google_palm"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
google_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "google_api_key", "GOOGLE_API_KEY")
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key.get_secret_value())
except ImportError:
raise ChatGooglePalmError(
"Could not import google.generativeai python package. "
"Please install it with `pip install google-generativeai`"
)
values["client"] = genai
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = chat_with_retry(
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
**kwargs,
)
return _response_to_result(response, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = await achat_with_retry(
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
)
return _response_to_result(response, stop)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "google-palm-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/oci_generative_ai.py | import json
import re
import uuid
from abc import ABC, abstractmethod
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
ToolCall,
ToolMessage,
)
from langchain_core.messages.tool import ToolCallChunk
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_function
from pydantic import BaseModel, ConfigDict
from langchain_community.llms.oci_generative_ai import OCIGenAIBase
from langchain_community.llms.utils import enforce_stop_tokens
CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint"
JSON_TO_PYTHON_TYPES = {
"string": "str",
"number": "float",
"boolean": "bool",
"integer": "int",
"array": "List",
"object": "Dict",
}
def _remove_signature_from_tool_description(name: str, description: str) -> str:
"""
Removes the `{name}{signature} - ` prefix and Args: section from tool description.
The signature is usually present for tools created with the @tool decorator,
whereas the Args: section may be present in function doc blocks.
"""
description = re.sub(rf"^{name}\(.*?\) -(?:> \w+? -)? ", "", description)
description = re.sub(r"(?s)(?:\n?\n\s*?)?Args:.*$", "", description)
return description
def _format_oci_tool_calls(
tool_calls: Optional[List[Any]] = None,
) -> List[Dict]:
"""
Formats a OCI GenAI API response into the tool call format used in Langchain.
"""
if not tool_calls:
return []
formatted_tool_calls = []
for tool_call in tool_calls:
formatted_tool_calls.append(
{
"id": uuid.uuid4().hex[:],
"function": {
"name": tool_call.name,
"arguments": json.dumps(tool_call.parameters),
},
"type": "function",
}
)
return formatted_tool_calls
def _convert_oci_tool_call_to_langchain(tool_call: Any) -> ToolCall:
"""Convert a OCI GenAI tool call into langchain_core.messages.ToolCall"""
_id = uuid.uuid4().hex[:]
return ToolCall(name=tool_call.name, args=tool_call.parameters, id=_id)
class Provider(ABC):
@property
@abstractmethod
def stop_sequence_key(self) -> str: ...
@abstractmethod
def chat_response_to_text(self, response: Any) -> str: ...
@abstractmethod
def chat_stream_to_text(self, event_data: Dict) -> str: ...
@abstractmethod
def is_chat_stream_end(self, event_data: Dict) -> bool: ...
@abstractmethod
def chat_generation_info(self, response: Any) -> Dict[str, Any]: ...
@abstractmethod
def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]: ...
@abstractmethod
def get_role(self, message: BaseMessage) -> str: ...
@abstractmethod
def messages_to_oci_params(
self, messages: Any, **kwargs: Any
) -> Dict[str, Any]: ...
@abstractmethod
def convert_to_oci_tool(
self,
tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
) -> Dict[str, Any]: ...
class CohereProvider(Provider):
stop_sequence_key: str = "stop_sequences"
def __init__(self) -> None:
from oci.generative_ai_inference import models
self.oci_chat_request = models.CohereChatRequest
self.oci_tool = models.CohereTool
self.oci_tool_param = models.CohereParameterDefinition
self.oci_tool_result = models.CohereToolResult
self.oci_tool_call = models.CohereToolCall
self.oci_chat_message = {
"USER": models.CohereUserMessage,
"CHATBOT": models.CohereChatBotMessage,
"SYSTEM": models.CohereSystemMessage,
"TOOL": models.CohereToolMessage,
}
self.chat_api_format = models.BaseChatRequest.API_FORMAT_COHERE
def chat_response_to_text(self, response: Any) -> str:
return response.data.chat_response.text
def chat_stream_to_text(self, event_data: Dict) -> str:
if "text" in event_data:
if "finishedReason" in event_data or "toolCalls" in event_data:
return ""
else:
return event_data["text"]
else:
return ""
def is_chat_stream_end(self, event_data: Dict) -> bool:
return "finishReason" in event_data
def chat_generation_info(self, response: Any) -> Dict[str, Any]:
generation_info: Dict[str, Any] = {
"documents": response.data.chat_response.documents,
"citations": response.data.chat_response.citations,
"search_queries": response.data.chat_response.search_queries,
"is_search_required": response.data.chat_response.is_search_required,
"finish_reason": response.data.chat_response.finish_reason,
}
if response.data.chat_response.tool_calls:
# Only populate tool_calls when 1) present on the response and
# 2) has one or more calls.
generation_info["tool_calls"] = _format_oci_tool_calls(
response.data.chat_response.tool_calls
)
return generation_info
def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]:
generation_info: Dict[str, Any] = {
"documents": event_data.get("documents"),
"citations": event_data.get("citations"),
"finish_reason": event_data.get("finishReason"),
}
if "toolCalls" in event_data:
generation_info["tool_calls"] = []
for tool_call in event_data["toolCalls"]:
generation_info["tool_calls"].append(
{
"id": uuid.uuid4().hex[:],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["parameters"]),
},
"type": "function",
}
)
generation_info = {k: v for k, v in generation_info.items() if v is not None}
return generation_info
def get_role(self, message: BaseMessage) -> str:
if isinstance(message, HumanMessage):
return "USER"
elif isinstance(message, AIMessage):
return "CHATBOT"
elif isinstance(message, SystemMessage):
return "SYSTEM"
elif isinstance(message, ToolMessage):
return "TOOL"
else:
raise ValueError(f"Got unknown type {message}")
def messages_to_oci_params(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Dict[str, Any]:
is_force_single_step = kwargs.get("is_force_single_step") or False
oci_chat_history = []
for msg in messages[:-1]:
if self.get_role(msg) == "USER" or self.get_role(msg) == "SYSTEM":
oci_chat_history.append(
self.oci_chat_message[self.get_role(msg)](message=msg.content)
)
elif isinstance(msg, AIMessage):
if msg.tool_calls and is_force_single_step:
continue
tool_calls = (
[
self.oci_tool_call(name=tc["name"], parameters=tc["args"])
for tc in msg.tool_calls
]
if msg.tool_calls
else None
)
msg_content = msg.content if msg.content else " "
oci_chat_history.append(
self.oci_chat_message[self.get_role(msg)](
message=msg_content, tool_calls=tool_calls
)
)
# Get the messages for the current chat turn
current_chat_turn_messages = []
for message in messages[::-1]:
current_chat_turn_messages.append(message)
if isinstance(message, HumanMessage):
break
current_chat_turn_messages = current_chat_turn_messages[::-1]
oci_tool_results: Union[List[Any], None] = []
for message in current_chat_turn_messages:
if isinstance(message, ToolMessage):
tool_message = message
previous_ai_msgs = [
message
for message in current_chat_turn_messages
if isinstance(message, AIMessage) and message.tool_calls
]
if previous_ai_msgs:
previous_ai_msg = previous_ai_msgs[-1]
for lc_tool_call in previous_ai_msg.tool_calls:
if lc_tool_call["id"] == tool_message.tool_call_id:
tool_result = self.oci_tool_result()
tool_result.call = self.oci_tool_call(
name=lc_tool_call["name"],
parameters=lc_tool_call["args"],
)
tool_result.outputs = [{"output": tool_message.content}]
oci_tool_results.append(tool_result)
if not oci_tool_results:
oci_tool_results = None
message_str = "" if oci_tool_results else messages[-1].content
oci_params = {
"message": message_str,
"chat_history": oci_chat_history,
"tool_results": oci_tool_results,
"api_format": self.chat_api_format,
}
return {k: v for k, v in oci_params.items() if v is not None}
def convert_to_oci_tool(
self,
tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
) -> Dict[str, Any]:
"""
Convert a BaseTool instance, JSON schema dict, or BaseModel type to a OCI tool.
"""
if isinstance(tool, BaseTool):
return self.oci_tool(
name=tool.name,
description=_remove_signature_from_tool_description(
tool.name, tool.description
),
parameter_definitions={
p_name: self.oci_tool_param(
description=p_def.get("description")
if "description" in p_def
else "",
type=JSON_TO_PYTHON_TYPES.get(
p_def.get("type"), p_def.get("type")
),
is_required="default" not in p_def,
)
for p_name, p_def in tool.args.items()
},
)
elif isinstance(tool, dict):
if not all(k in tool for k in ("title", "description", "properties")):
raise ValueError(
"Unsupported dict type. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type." # noqa: E501
)
return self.oci_tool(
name=tool.get("title"),
description=tool.get("description"),
parameter_definitions={
p_name: self.oci_tool_param(
description=p_def.get("description"),
type=JSON_TO_PYTHON_TYPES.get(
p_def.get("type"), p_def.get("type")
),
is_required="default" not in p_def,
)
for p_name, p_def in tool.get("properties", {}).items()
},
)
elif (isinstance(tool, type) and issubclass(tool, BaseModel)) or callable(tool):
as_json_schema_function = convert_to_openai_function(tool)
parameters = as_json_schema_function.get("parameters", {})
properties = parameters.get("properties", {})
return self.oci_tool(
name=as_json_schema_function.get("name"),
description=as_json_schema_function.get(
"description",
as_json_schema_function.get("name"),
),
parameter_definitions={
p_name: self.oci_tool_param(
description=p_def.get("description"),
type=JSON_TO_PYTHON_TYPES.get(
p_def.get("type"), p_def.get("type")
),
is_required=p_name in parameters.get("required", []),
)
for p_name, p_def in properties.items()
},
)
else:
raise ValueError(
f"Unsupported tool type {type(tool)}. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type." # noqa: E501
)
class MetaProvider(Provider):
stop_sequence_key: str = "stop"
def __init__(self) -> None:
from oci.generative_ai_inference import models
self.oci_chat_request = models.GenericChatRequest
self.oci_chat_message = {
"USER": models.UserMessage,
"SYSTEM": models.SystemMessage,
"ASSISTANT": models.AssistantMessage,
}
self.oci_chat_message_content = models.TextContent
self.chat_api_format = models.BaseChatRequest.API_FORMAT_GENERIC
def chat_response_to_text(self, response: Any) -> str:
return response.data.chat_response.choices[0].message.content[0].text
def chat_stream_to_text(self, event_data: Dict) -> str:
return event_data["message"]["content"][0]["text"]
def is_chat_stream_end(self, event_data: Dict) -> bool:
return "message" not in event_data
def chat_generation_info(self, response: Any) -> Dict[str, Any]:
return {
"finish_reason": response.data.chat_response.choices[0].finish_reason,
"time_created": str(response.data.chat_response.time_created),
}
def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]:
return {
"finish_reason": event_data["finishReason"],
}
def get_role(self, message: BaseMessage) -> str:
# meta only supports alternating user/assistant roles
if isinstance(message, HumanMessage):
return "USER"
elif isinstance(message, AIMessage):
return "ASSISTANT"
elif isinstance(message, SystemMessage):
return "SYSTEM"
else:
raise ValueError(f"Got unknown type {message}")
def messages_to_oci_params(
self, messages: List[BaseMessage], **kwargs: Any
) -> Dict[str, Any]:
oci_messages = [
self.oci_chat_message[self.get_role(msg)](
content=[self.oci_chat_message_content(text=msg.content)]
)
for msg in messages
]
oci_params = {
"messages": oci_messages,
"api_format": self.chat_api_format,
"top_k": -1,
}
return oci_params
def convert_to_oci_tool(
self,
tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
) -> Dict[str, Any]:
raise NotImplementedError("Tools not supported for Meta models")
class ChatOCIGenAI(BaseChatModel, OCIGenAIBase):
"""ChatOCIGenAI chat model integration.
Setup:
Install ``langchain-community`` and the ``oci`` sdk.
.. code-block:: bash
pip install -U langchain-community oci
Key init args — completion params:
model_id: str
Id of the OCIGenAI chat model to use, e.g., cohere.command-r-16k.
is_stream: bool
Whether to stream back partial progress
model_kwargs: Optional[Dict]
Keyword arguments to pass to the specific model used, e.g., temperature, max_tokens.
Key init args — client params:
service_endpoint: str
The endpoint URL for the OCIGenAI service, e.g., https://inference.generativeai.us-chicago-1.oci.oraclecloud.com.
compartment_id: str
The compartment OCID.
auth_type: str
The authentication type to use, e.g., API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL.
auth_profile: Optional[str]
The name of the profile in ~/.oci/config, if not specified , DEFAULT will be used.
provider: str
Provider name of the model. Default to None, will try to be derived from the model_id otherwise, requires user input.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatOCIGenAI
chat = ChatOCIGenAI(
model_id="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
model_kwargs={"temperature": 0.7, "max_tokens": 500},
)
Invoke:
.. code-block:: python
messages = [
SystemMessage(content="your are an AI assistant."),
AIMessage(content="Hi there human!"),
HumanMessage(content="tell me a joke."),
]
response = chat.invoke(messages)
Stream:
.. code-block:: python
for r in chat.stream(messages):
print(r.content, end="", flush=True)
Response metadata
.. code-block:: python
response = chat.invoke(messages)
print(response.response_metadata)
""" # noqa: E501
model_config = ConfigDict(
extra="forbid",
arbitrary_types_allowed=True,
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_generative_ai_chat"
@property
def _provider_map(self) -> Mapping[str, Any]:
"""Get the provider map"""
return {
"cohere": CohereProvider(),
"meta": MetaProvider(),
}
@property
def _provider(self) -> Any:
"""Get the internal provider object"""
return self._get_provider(provider_map=self._provider_map)
def _prepare_request(
self,
messages: List[BaseMessage],
stop: Optional[List[str]],
stream: bool,
**kwargs: Any,
) -> Dict[str, Any]:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
oci_params = self._provider.messages_to_oci_params(messages, **kwargs)
oci_params["is_stream"] = stream
_model_kwargs = self.model_kwargs or {}
if stop is not None:
_model_kwargs[self._provider.stop_sequence_key] = stop
chat_params = {**_model_kwargs, **kwargs, **oci_params}
if not self.model_id:
raise ValueError("Model ID is required to chat")
if self.model_id.startswith(CUSTOM_ENDPOINT_PREFIX):
serving_mode = models.DedicatedServingMode(endpoint_id=self.model_id)
else:
serving_mode = models.OnDemandServingMode(model_id=self.model_id)
request = models.ChatDetails(
compartment_id=self.compartment_id,
serving_mode=serving_mode,
chat_request=self._provider.oci_chat_request(**chat_params),
)
return request
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
formatted_tools = [self._provider.convert_to_oci_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict[Any, Any], Type[BaseModel]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict.
Returns:
A Runnable that takes any ChatModel input and returns either a dict or
Pydantic class as output.
"""
llm = self.bind_tools([schema], **kwargs)
if isinstance(schema, type) and issubclass(schema, BaseModel):
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], first_tool_only=True
)
else:
key_name = getattr(self._provider.convert_to_oci_tool(schema), "name")
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
return llm | output_parser
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to a OCIGenAI chat model.
Args:
messages: list of LangChain messages
stop: Optional list of stop words to use.
Returns:
LangChain ChatResult
Example:
.. code-block:: python
messages = [
HumanMessage(content="hello!"),
AIMessage(content="Hi there human!"),
HumanMessage(content="Meow!")
]
response = llm.invoke(messages)
"""
if self.is_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
request = self._prepare_request(messages, stop=stop, stream=False, **kwargs)
response = self.client.chat(request)
content = self._provider.chat_response_to_text(response)
if stop is not None:
content = enforce_stop_tokens(content, stop)
generation_info = self._provider.chat_generation_info(response)
llm_output = {
"model_id": response.data.model_id,
"model_version": response.data.model_version,
"request_id": response.request_id,
"content-length": response.headers["content-length"],
}
if "tool_calls" in generation_info:
tool_calls = [
_convert_oci_tool_call_to_langchain(tool_call)
for tool_call in response.data.chat_response.tool_calls
]
else:
tool_calls = []
message = AIMessage(
content=content,
additional_kwargs=generation_info,
tool_calls=tool_calls,
)
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
],
llm_output=llm_output,
)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
request = self._prepare_request(messages, stop=stop, stream=True, **kwargs)
response = self.client.chat(request)
for event in response.data.events():
event_data = json.loads(event.data)
if not self._provider.is_chat_stream_end(event_data): # still streaming
delta = self._provider.chat_stream_to_text(event_data)
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
else: # stream end
generation_info = self._provider.chat_stream_generation_info(event_data)
tool_call_chunks = []
if tool_calls := generation_info.get("tool_calls"):
content = self._provider.chat_stream_to_text(event_data)
try:
tool_call_chunks = [
ToolCallChunk(
name=tool_call["function"].get("name"),
args=tool_call["function"].get("arguments"),
id=tool_call.get("id"),
index=tool_call.get("index"),
)
for tool_call in tool_calls
]
except KeyError:
pass
else:
content = ""
message = AIMessageChunk(
content=content,
additional_kwargs=generation_info,
tool_call_chunks=tool_call_chunks,
)
yield ChatGenerationChunk(
message=message,
generation_info=generation_info,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/javelin_ai_gateway.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
)
from pydantic import BaseModel, ConfigDict, Field, SecretStr
logger = logging.getLogger(__name__)
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
class ChatParams(BaseModel, extra="allow"):
"""Parameters for the `Javelin AI Gateway` LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class ChatJavelinAIGateway(BaseChatModel):
"""`Javelin AI Gateway` chat models API.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.chat_models import ChatJavelinAIGateway
chat = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
params: Optional[ChatParams] = None
"""Parameters for the Javelin AI Gateway LLM."""
client: Any = None
"""javelin client."""
javelin_api_key: Optional[SecretStr] = Field(None, alias="api_key")
"""The API key for the Javelin AI Gateway."""
model_config = ConfigDict(
populate_by_name=True,
)
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri,
api_key=cast(SecretStr, self.javelin_api_key).get_secret_value(),
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"javelin_api_key": cast(SecretStr, self.javelin_api_key).get_secret_value(),
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = self.client.query_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = await self.client.aquery_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "javelin-ai-gateway-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatJavelinAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Javelin AI Gateway "
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for candidate in response["llm_response"]["choices"]:
message = ChatJavelinAIGateway._convert_dict_to_message(
candidate["message"]
)
message_metadata = candidate.get("metadata", {})
gen = ChatGeneration(
message=message,
generation_info=dict(message_metadata),
)
generations.append(gen)
response_metadata = response.get("metadata", {})
return ChatResult(generations=generations, llm_output=response_metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/dappier.py | from typing import Any, Dict, List, Optional, Union
from aiohttp import ClientSession
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
)
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import ConfigDict, Field, SecretStr, model_validator
from langchain_community.utilities.requests import Requests
def _format_dappier_messages(
messages: List[BaseMessage],
) -> List[Dict[str, Union[str, List[Union[str, Dict[Any, Any]]]]]]:
formatted_messages = []
for message in messages:
if message.type == "human":
formatted_messages.append({"role": "user", "content": message.content})
elif message.type == "system":
formatted_messages.append({"role": "system", "content": message.content})
return formatted_messages
class ChatDappierAI(BaseChatModel):
"""`Dappier` chat large language models.
`Dappier` is a platform enabling access to diverse, real-time data models.
Enhance your AI applications with Dappier's pre-trained, LLM-ready data models
and ensure accurate, current responses with reduced inaccuracies.
To use one of our Dappier AI Data Models, you will need an API key.
Please visit Dappier Platform (https://platform.dappier.com/) to log in
and create an API key in your profile.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatDappierAI
from langchain_core.messages import HumanMessage
# Initialize `ChatDappierAI` with the desired configuration
chat = ChatDappierAI(
dappier_endpoint="https://api.dappier.com/app/datamodel/dm_01hpsxyfm2fwdt2zet9cg6fdxt",
dappier_api_key="<YOUR_KEY>")
# Create a list of messages to interact with the model
messages = [HumanMessage(content="hello")]
# Invoke the model with the provided messages
chat.invoke(messages)
you can find more details here : https://docs.dappier.com/introduction"""
dappier_endpoint: str = "https://api.dappier.com/app/datamodelconversation"
dappier_model: str = "dm_01hpsxyfm2fwdt2zet9cg6fdxt"
dappier_api_key: Optional[SecretStr] = Field(None, description="Dappier API Token")
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
values["dappier_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "dappier_api_key", "DAPPIER_API_KEY")
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "dappier-realtimesearch-chat"
@property
def _api_key(self) -> str:
if self.dappier_api_key:
return self.dappier_api_key.get_secret_value()
return ""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
url = f"{self.dappier_endpoint}"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
user_query = _format_dappier_messages(messages=messages)
payload: Dict[str, Any] = {
"model": self.dappier_model,
"conversation": user_query,
}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
response.raise_for_status()
data = response.json()
message_response = data["message"]
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content=message_response))]
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
url = f"{self.dappier_endpoint}"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
user_query = _format_dappier_messages(messages=messages)
payload: Dict[str, Any] = {
"model": self.dappier_model,
"conversation": user_query,
}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
response.raise_for_status()
data = await response.json()
message_response = data["message"]
return ChatResult(
generations=[
ChatGeneration(message=AIMessage(content=message_response))
]
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/fake.py | """Fake ChatModel for testing purposes."""
import asyncio
import time
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.messages import AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
class FakeMessagesListChatModel(BaseChatModel):
"""Fake ChatModel for testing purposes."""
responses: List[BaseMessage]
sleep: Optional[float] = None
i: int = 0
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
class FakeListChatModel(SimpleChatModel):
"""Fake ChatModel for testing purposes."""
responses: List
sleep: Optional[float] = None
i: int = 0
@property
def _llm_type(self) -> str:
return "fake-list-chat-model"
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
def _stream(
self,
messages: List[BaseMessage],
stop: Union[List[str], None] = None,
run_manager: Union[CallbackManagerForLLMRun, None] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
for c in response:
if self.sleep is not None:
time.sleep(self.sleep)
yield ChatGenerationChunk(message=AIMessageChunk(content=c))
async def _astream(
self,
messages: List[BaseMessage],
stop: Union[List[str], None] = None,
run_manager: Union[AsyncCallbackManagerForLLMRun, None] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
for c in response:
if self.sleep is not None:
await asyncio.sleep(self.sleep)
yield ChatGenerationChunk(message=AIMessageChunk(content=c))
@property
def _identifying_params(self) -> Dict[str, Any]:
return {"responses": self.responses}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/tongyi.py | from __future__ import annotations
import asyncio
import functools
import json
import logging
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
)
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.tongyi import (
agenerate_with_last_element_mark,
check_response,
generate_with_last_element_mark,
)
logger = logging.getLogger(__name__)
def convert_dict_to_message(
_dict: Mapping[str, Any], is_chunk: bool = False
) -> Union[BaseMessage, BaseMessageChunk]:
"""Convert a dict to a message."""
role = _dict["role"]
content = _dict["content"]
if role == "user":
return (
HumanMessageChunk(content=content)
if is_chunk
else HumanMessage(content=content)
)
elif role == "assistant":
tool_calls = []
invalid_tool_calls = []
if "tool_calls" in _dict:
additional_kwargs = {"tool_calls": _dict["tool_calls"]}
for index, value in enumerate(_dict["tool_calls"]):
if is_chunk:
try:
tool_calls.append(
{
"name": value["function"].get("name"),
"args": value["function"].get("arguments"),
"id": value.get("id"),
# Tongyi does not respond with index,
# use index in the list instead
"index": index,
}
)
except KeyError:
pass
else:
try:
parsed_tool = parse_tool_call(value, return_id=True)
if parsed_tool:
tool_calls.append(parsed_tool)
except Exception as e:
invalid_tool_calls.append(make_invalid_tool_call(value, str(e)))
else:
additional_kwargs = {}
return (
AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
tool_call_chunks=tool_calls, # type: ignore[arg-type]
id=_dict.get("id"),
)
if is_chunk
else AIMessage(
content=content,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=invalid_tool_calls,
)
)
elif role == "system":
return (
SystemMessageChunk(content=content)
if is_chunk
else SystemMessage(content=content)
)
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return (
ToolMessageChunk(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
if is_chunk
else ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
)
else:
return (
ChatMessageChunk(role=role, content=content)
if is_chunk
else ChatMessage(role=role, content=content)
)
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage:
"""Convert a message chunk to a message.
Args:
chunk: Message chunk to convert.
Returns:
Message.
"""
if not isinstance(message_chunk, BaseMessageChunk):
return message_chunk
# chunk classes always have the equivalent non-chunk class as their first parent
ignore_keys = ["type"]
if isinstance(message_chunk, AIMessageChunk):
ignore_keys.append("tool_call_chunks")
return message_chunk.__class__.__mro__[1](
**{k: v for k, v in message_chunk.__dict__.items() if k not in ignore_keys}
)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dict."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": message.content,
"name": message.name or message.additional_kwargs.get("name"),
}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "tool",
"tool_call_id": "",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_retry_decorator(llm: ChatTongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
class ChatTongyi(BaseChatModel):
"""Alibaba Tongyi Qwen chat model integration.
Setup:
Install ``dashscope`` and set environment variables ``DASHSCOPE_API_KEY``.
.. code-block:: bash
pip install dashscope
export DASHSCOPE_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Qianfan model to use.
top_p: float
Total probability mass of tokens to consider at each step.
streaming: bool
Whether to stream the results or not.
Key init args — client params:
api_key: Optional[str]
Dashscope API KEY. If not passed in will be read from env var DASHSCOPE_API_KEY.
max_retries: int
Maximum number of retries to make when generating.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatTongyi
tongyi_chat = ChatTongyi(
model="qwen-max",
# top_p="...",
# api_key="...",
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
tongyi_chat.invoke(messages)
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'model_name': 'qwen-max',
'finish_reason': 'stop',
'request_id': '0bd14853-4abc-9593-8642-8dbb915bd4df',
'token_usage': {
'input_tokens': 30,
'output_tokens': 4,
'total_tokens': 34
}
},
id='run-533b3688-d12b-40c6-a2f7-52f291f8fa0a-0'
)
Stream:
.. code-block:: python
for chunk in tongyi_chat.stream(messages):
print(chunk)
.. code-block:: python
content='I' id='run-8fbcce63-42fc-4208-9399-da46ac40c967'
content=' enjoy' id='run-8fbcce63-42fc-4208-9399-da46ac40c967'
content=' programming' id='run-8fbcce63-42fc-4208-9399-da46ac40c967'
content='.' response_metadata={'finish_reason': 'stop', 'request_id': '67aec2b5-72bf-96a4-ae29-5bfebd2e7305', 'token_usage': {'input_tokens': 30, 'output_tokens': 4, 'total_tokens': 34}} id='run-8fbcce63-42fc-4208-9399-da46ac40c967'
Async:
.. code-block:: python
await tongyi_chat.ainvoke(messages)
# stream:
# async for chunk in tongyi_chat.astream(messages):
# print(chunk)
# batch:
# await tongyi_chat.abatch([messages])
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'model_name': 'qwen-max',
'finish_reason': 'stop',
'request_id': 'a55a2d6c-a876-9789-9dd9-7b52bf8adde0',
'token_usage': {
'input_tokens': 30,
'output_tokens': 4,
'total_tokens': 34
}
},
id='run-3bffa3ec-e8d9-4043-b57d-348e047d64de-0'
)
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
chat_with_tools = tongyi_chat.bind_tools([GetWeather, GetPopulation])
ai_msg = chat_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
.. code-block:: python
[
{
'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': ''
}
]
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_chat = tongyi_chat.with_structured_output(Joke)
structured_chat.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(
setup='Why did the cat join the band?',
punchline='Because it wanted to be a solo purr-sonality!',
rating=None
)
Response metadata
.. code-block:: python
ai_msg = tongyi_chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'model_name': 'qwen-max',
'finish_reason': 'stop',
'request_id': '32a13e4c-370e-99cb-8f9b-4c999d98c57d',
'token_usage': {
'input_tokens': 30,
'output_tokens': 4,
'total_tokens': 34
}
}
""" # noqa: E501
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
client: Any = None #: :meta private:
model_name: str = Field(default="qwen-turbo", alias="model")
"""Model name to use.
callable multimodal model:
- qwen-vl-v1
- qwen-vl-chat-v1
- qwen-audio-turbo
- qwen-vl-plus
- qwen-vl-max
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[SecretStr] = Field(None, alias="api_key")
"""Dashscope api key provide by Alibaba Cloud."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
model_config = ConfigDict(
populate_by_name=True,
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
)
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope --upgrade`."
)
dashscope_multimodal_models = [
"qwen-vl-v1",
"qwen-vl-chat-v1",
"qwen-audio-turbo",
"qwen-vl-plus",
"qwen-vl-max",
]
if (
values["model_name"] in dashscope_multimodal_models
or "vl" in values["model_name"]
):
try:
values["client"] = dashscope.MultiModalConversation
except AttributeError:
raise ValueError(
"`dashscope` has no `MultiModalConversation` attribute, this is "
"likely due to an old version of the dashscope package. Try "
"upgrading it with `pip install --upgrade dashscope`."
)
else:
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Tongyi Qwen API."""
return {
"model": self.model_name,
"top_p": self.top_p,
"api_key": cast(SecretStr, self.dashscope_api_key).get_secret_value(),
"result_format": "message",
**self.model_kwargs,
}
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
resp = self.client.call(**_kwargs)
return check_response(resp)
return _completion_with_retry(**kwargs)
def stream_completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
responses = self.client.call(**_kwargs)
prev_resp = None
for resp in responses:
# If we are streaming without `incremental_output = True`,
# we need to calculate the delta response manually
if _kwargs.get("stream") and not _kwargs.get(
"incremental_output", False
):
if prev_resp is None:
delta_resp = resp
else:
delta_resp = self.subtract_client_response(resp, prev_resp)
prev_resp = resp
yield check_response(delta_resp)
else:
yield check_response(resp)
return _stream_completion_with_retry(**kwargs)
def subtract_client_response(self, resp: Any, prev_resp: Any) -> Any:
"""Subtract prev response from curr response.
Useful when streaming without `incremental_output = True`
"""
resp_copy = json.loads(json.dumps(resp))
choice = resp_copy["output"]["choices"][0]
message = choice["message"]
prev_resp_copy = json.loads(json.dumps(prev_resp))
prev_choice = prev_resp_copy["output"]["choices"][0]
prev_message = prev_choice["message"]
message["content"] = message["content"].replace(prev_message["content"], "")
if message.get("tool_calls"):
for index, tool_call in enumerate(message["tool_calls"]):
function = tool_call["function"]
if prev_message.get("tool_calls"):
prev_function = prev_message["tool_calls"][index]["function"]
function["name"] = function["name"].replace(
prev_function["name"], ""
)
function["arguments"] = function["arguments"].replace(
prev_function["arguments"], ""
)
return resp_copy
async def astream_completion_with_retry(self, **kwargs: Any) -> Any:
"""Because the dashscope SDK doesn't provide an async API,
we wrap `stream_generate_with_retry` with an async generator."""
class _AioTongyiGenerator:
def __init__(self, generator: Any):
self.generator = generator
def __aiter__(self) -> AsyncIterator[Any]:
return self
async def __anext__(self) -> Any:
value = await asyncio.get_running_loop().run_in_executor(
None, self._safe_next
)
if value is not None:
return value
else:
raise StopAsyncIteration
def _safe_next(self) -> Any:
try:
return next(self.generator)
except StopIteration:
return None
async for chunk in _AioTongyiGenerator(
generator=self.stream_completion_with_retry(**kwargs)
):
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
generations = []
if self.streaming:
generation_chunk: Optional[ChatGenerationChunk] = None
for chunk in self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
if generation_chunk is None:
generation_chunk = chunk
else:
generation_chunk += chunk
assert generation_chunk is not None
generations.append(self._chunk_to_generation(generation_chunk))
else:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, **kwargs
)
resp = self.completion_with_retry(**params)
generations.append(
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
)
return ChatResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
generations = []
if self.streaming:
generation: Optional[ChatGenerationChunk] = None
async for chunk in self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
generations.append(self._chunk_to_generation(generation))
else:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, **kwargs
)
resp = await asyncio.get_running_loop().run_in_executor(
None,
functools.partial(self.completion_with_retry, **params),
)
generations.append(
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
)
return ChatResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, stream=True, **kwargs
)
for stream_resp, is_last_chunk in generate_with_last_element_mark(
self.stream_completion_with_retry(**params)
):
choice = stream_resp["output"]["choices"][0]
message = choice["message"]
if (
choice["finish_reason"] == "null"
and message["content"] == ""
and "tool_calls" not in message
):
continue
chunk = ChatGenerationChunk(
**self._chat_generation_from_qwen_resp(
stream_resp, is_chunk=True, is_last_chunk=is_last_chunk
)
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, stream=True, **kwargs
)
async for stream_resp, is_last_chunk in agenerate_with_last_element_mark(
self.astream_completion_with_retry(**params)
):
chunk = ChatGenerationChunk(
**self._chat_generation_from_qwen_resp(
stream_resp, is_chunk=True, is_last_chunk=is_last_chunk
)
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def _invocation_params(
self, messages: List[BaseMessage], stop: Any, **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
if stop is not None:
params["stop"] = stop
# According to the Tongyi official docs,
# `incremental_output` with `tools` is not supported yet
if params.get("stream") and not params.get("tools"):
params["incremental_output"] = True
message_dicts = [convert_message_to_dict(m) for m in messages]
# And the `system` message should be the first message if present
system_message_indices = [
i for i, m in enumerate(message_dicts) if m["role"] == "system"
]
if len(system_message_indices) == 1 and system_message_indices[0] != 0:
raise ValueError("System message can only be the first message.")
elif len(system_message_indices) > 1:
raise ValueError("There can be only one system message at most.")
params["messages"] = message_dicts
return params
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
if llm_outputs[0] is None:
return {}
return llm_outputs[0]
@staticmethod
def _chat_generation_from_qwen_resp(
resp: Any, is_chunk: bool = False, is_last_chunk: bool = True
) -> Dict[str, Any]:
# According to the response from dashscope,
# each chunk's `generation_info` overwrites the previous one.
# Besides, The `merge_dicts` method,
# which is used to concatenate `generation_info` in `GenerationChunk`,
# does not support merging of int type values.
# Therefore, we adopt the `generation_info` of the last chunk
# and discard the `generation_info` of the intermediate chunks.
choice = resp["output"]["choices"][0]
message = convert_dict_to_message(choice["message"], is_chunk=is_chunk)
if is_last_chunk:
return dict(
message=message,
generation_info=dict(
finish_reason=choice["finish_reason"],
request_id=resp["request_id"],
token_usage=dict(resp["usage"]),
),
)
else:
return dict(message=message)
@staticmethod
def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration:
return ChatGeneration(
message=convert_message_chunk_to_message(chunk.message),
generation_info=chunk.generation_info,
)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
"""
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
llm = self.bind_tools([schema])
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
key_name = convert_to_openai_tool(schema)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/openai.py | """OpenAI chat wrapper."""
from __future__ import annotations
import logging
import os
import sys
import warnings
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessageChunk,
FunctionMessageChunk,
HumanMessageChunk,
SystemMessageChunk,
ToolMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import BaseModel, ConfigDict, Field, model_validator
from langchain_community.adapters.openai import (
convert_dict_to_message,
convert_message_to_dict,
)
from langchain_community.utils.openai import is_openai_v1
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(
llm: ChatOpenAI,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout, # type: ignore[attr-defined]
openai.error.APIError, # type: ignore[attr-defined]
openai.error.APIConnectionError, # type: ignore[attr-defined]
openai.error.RateLimitError, # type: ignore[attr-defined]
openai.error.ServiceUnavailableError, # type: ignore[attr-defined]
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
async def acompletion_with_retry(
llm: ChatOpenAI,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
if is_openai_v1():
return await llm.async_client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _update_token_usage(
overall_token_usage: Union[int, dict], new_usage: Union[int, dict]
) -> Union[int, dict]:
# Token usage is either ints or dictionaries
# `reasoning_tokens` is nested inside `completion_tokens_details`
if isinstance(new_usage, int):
if not isinstance(overall_token_usage, int):
raise ValueError(
f"Got different types for token usage: "
f"{type(new_usage)} and {type(overall_token_usage)}"
)
return new_usage + overall_token_usage
elif isinstance(new_usage, dict):
if not isinstance(overall_token_usage, dict):
raise ValueError(
f"Got different types for token usage: "
f"{type(new_usage)} and {type(overall_token_usage)}"
)
return {
k: _update_token_usage(overall_token_usage.get(k, 0), v)
for k, v in new_usage.items()
}
else:
warnings.warn(f"Unexpected type for token usage: {type(new_usage)}")
return new_usage
@deprecated(
since="0.0.10", removal="1.0", alternative_import="langchain_openai.ChatOpenAI"
)
class ChatOpenAI(BaseChatModel):
"""`OpenAI` Chat large language models API.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOpenAI
openai = ChatOpenAI(model="gpt-3.5-turbo")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "openai"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
max_retries: int = Field(default=2)
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
elif not values.get("client"):
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
else:
pass
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
params = {
"model": self.model_name,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
params["request_timeout"] = self.request_timeout
return params
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return self.client.create(**kwargs)
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
system_fingerprint = None
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
if token_usage is not None:
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] = _update_token_usage(
overall_token_usage[k], v
)
else:
overall_token_usage[k] = v
if system_fingerprint is None:
system_fingerprint = output.get("system_fingerprint")
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint:
combined["system_fingerprint"] = system_fingerprint
return combined
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
if choice["delta"] is None:
continue
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
message = convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
"system_fingerprint": response.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
if choice["delta"] is None:
continue
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
response = await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
openai_creds: Dict[str, Any] = {
"model": self.model_name,
}
if not is_openai_v1():
openai_creds.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
)
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[attr-defined]
return {**self._default_params, **openai_creds}
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model_name,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
def get_num_tokens_from_messages(
self,
messages: List[BaseMessage],
tools: Optional[
Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]
] = None,
) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if tools is not None:
warnings.warn(
"Counting tokens in tool schemas is not yet supported. Ignoring tools."
)
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model.startswith("gpt-3.5-turbo-0301"):
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
def bind_functions(
self,
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
function_call: Optional[str] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind functions (and other objects) to this chat model.
Args:
functions: A list of function definitions to bind to this chat model.
Can be a dictionary, pydantic model, or callable. Pydantic
models and callables will be automatically converted to
their schema dictionary representation.
function_call: Which function to require the model to call.
Must be the name of the single provided function or
"auto" to automatically determine which function to call
(if any).
kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
from langchain.chains.openai_functions.base import convert_to_openai_function
formatted_functions = [convert_to_openai_function(fn) for fn in functions]
if function_call is not None:
if len(formatted_functions) != 1:
raise ValueError(
"When specifying `function_call`, you must provide exactly one "
"function."
)
if formatted_functions[0]["name"] != function_call:
raise ValueError(
f"Function call {function_call} was specified, but the only "
f"provided function was {formatted_functions[0]['name']}."
)
function_call_ = {"name": function_call}
kwargs = {**kwargs, "function_call": function_call_}
return super().bind(
functions=formatted_functions,
**kwargs,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/ollama.py | import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel, LangSmithParams
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.ollama import OllamaEndpointNotFoundError, _OllamaCommon
@deprecated("0.0.3", alternative="_chat_stream_response_to_chat_generation_chunk")
def _stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(content=parsed_response.get("response", "")),
generation_info=generation_info,
)
def _chat_stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(
content=parsed_response.get("message", {}).get("content", "")
),
generation_info=generation_info,
)
@deprecated(
since="0.3.1",
removal="1.0.0",
alternative_import="langchain_ollama.ChatOllama",
)
class ChatOllama(BaseChatModel, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOllama
ollama = ChatOllama(model="llama2")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "ollama-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="ollama",
ls_model_name=self.model,
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("num_predict", self.num_predict):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None) or self.stop:
ls_params["ls_stop"] = ls_stop
return ls_params
@deprecated("0.0.3", alternative="_convert_messages_to_ollama_messages")
def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
if isinstance(message.content, List):
first_content = cast(List[Dict], message.content)[0]
content_type = first_content.get("type")
if content_type == "text":
message_text = f"[INST] {first_content['text']} [/INST]"
elif content_type == "image_url":
message_text = first_content["image_url"]["url"]
else:
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _format_messages_as_text(self, messages: List[BaseMessage]) -> str:
return "\n".join(
[self._format_message_as_text(message) for message in messages]
)
def _convert_messages_to_ollama_messages(
self, messages: List[BaseMessage]
) -> List[Dict[str, Union[str, List[str]]]]:
ollama_messages: List = []
for message in messages:
role = ""
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
else:
raise ValueError("Received unsupported message type for Ollama.")
content = ""
images = []
if isinstance(message.content, str):
content = message.content
else:
for content_part in cast(List[Dict], message.content):
if content_part.get("type") == "text":
content += f"\n{content_part['text']}"
elif content_part.get("type") == "image_url":
image_url = None
temp_image_url = content_part.get("image_url")
if isinstance(temp_image_url, str):
image_url = content_part["image_url"]
elif (
isinstance(temp_image_url, dict) and "url" in temp_image_url
):
image_url = temp_image_url["url"]
else:
raise ValueError(
"Only string image_url or dict with string 'url' "
"inside content parts are supported."
)
image_url_components = image_url.split(",")
# Support data:image/jpeg;base64,<image> format
# and base64 strings
if len(image_url_components) > 1:
images.append(image_url_components[1])
else:
images.append(image_url_components[0])
else:
raise ValueError(
"Unsupported message content type. "
"Must either have type 'text' or type 'image_url' "
"with a string 'image_url' field."
)
ollama_messages.append(
{
"role": role,
"content": content,
"images": images,
}
)
return ollama_messages
def _create_chat_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
payload = {
"model": self.model,
"messages": self._convert_messages_to_ollama_messages(messages),
}
yield from self._create_stream(
payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat", **kwargs
)
async def _acreate_chat_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
payload = {
"model": self.model,
"messages": self._convert_messages_to_ollama_messages(messages),
}
async for stream_resp in self._acreate_stream(
payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat", **kwargs
):
yield stream_resp
def _chat_stream_with_aggregation(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk: Optional[ChatGenerationChunk] = None
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
async def _achat_stream_with_aggregation(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk: Optional[ChatGenerationChunk] = None
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = self._chat_stream_with_aggregation(
messages,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = await self._achat_stream_with_aggregation(
messages,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
try:
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
except OllamaEndpointNotFoundError:
yield from self._legacy_stream(messages, stop, **kwargs)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
@deprecated("0.0.3", alternative="_stream")
def _legacy_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._format_messages_as_text(messages)
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_chat_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/human.py | """ChatModel wrapper which returns user input as the response.."""
from io import StringIO
from typing import Any, Callable, Dict, List, Mapping, Optional
import yaml
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
BaseMessage,
HumanMessage,
_message_from_dict,
messages_to_dict,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from pydantic import Field
from langchain_community.llms.utils import enforce_stop_tokens
def _display_messages(messages: List[BaseMessage]) -> None:
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(
message,
default_flow_style=False,
sort_keys=False,
allow_unicode=True,
width=10000,
line_break=None,
)
print("\n", "======= start of message =======", "\n\n") # noqa: T201
print(yaml_string) # noqa: T201
print("======= end of message =======", "\n\n") # noqa: T201
def _collect_yaml_input(
messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> BaseMessage:
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = "\n".join(lines)
# Try to parse the input string as YAML
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content="")
if stop:
if isinstance(message.content, str):
message.content = enforce_stop_tokens(message.content, stop)
else:
raise ValueError("Cannot use when output is not a string.")
return message
except yaml.YAMLError:
raise ValueError("Invalid YAML string entered.")
except ValueError:
raise ValueError("Invalid message entered.")
class HumanInputChatModel(BaseChatModel):
"""ChatModel which returns user input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
message_func: Callable = Field(default_factory=lambda: _display_messages)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
message_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"input_func": self.input_func.__name__,
"message_func": self.message_func.__name__,
}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Displays the messages to the user and returns their input as a response.
Args:
messages (List[BaseMessage]): The messages to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
ChatResult: The user's input as a response.
"""
self.message_func(messages, **self.message_kwargs)
user_input = self.input_func(messages, stop=stop, **self.input_kwargs)
return ChatResult(generations=[ChatGeneration(message=user_input)])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/azureml_endpoint.py | import json
import warnings
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
Type,
cast,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.azureml_endpoint import (
AzureMLBaseEndpoint,
AzureMLEndpointApiType,
ContentFormatterBase,
)
class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for `LLaMA`."""
def __init__(self) -> None:
raise TypeError(
"`LlamaContentFormatter` is deprecated for chat models. Use "
"`CustomOpenAIContentFormatter` instead."
)
class CustomOpenAIChatContentFormatter(ContentFormatterBase):
"""Chat Content formatter for models with OpenAI like API scheme."""
SUPPORTED_ROLES: List[str] = ["user", "assistant", "system"]
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> Dict:
"""Converts a message to a dict according to a role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
return {
"role": "user",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, AIMessage):
return {
"role": "assistant",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, SystemMessage):
return {
"role": "system",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif (
isinstance(message, ChatMessage)
and message.role in CustomOpenAIChatContentFormatter.SUPPORTED_ROLES
):
return {
"role": message.role,
"content": ContentFormatterBase.escape_special_characters(content),
}
else:
supported = ",".join(
[role for role in CustomOpenAIChatContentFormatter.SUPPORTED_ROLES]
)
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.dedicated, AzureMLEndpointApiType.serverless]
def format_messages_request_payload(
self,
messages: List[BaseMessage],
model_kwargs: Dict,
api_type: AzureMLEndpointApiType,
) -> bytes:
"""Formats the request according to the chosen api"""
chat_messages = [
CustomOpenAIChatContentFormatter._convert_message_to_dict(message)
for message in messages
]
if api_type in [
AzureMLEndpointApiType.dedicated,
AzureMLEndpointApiType.realtime,
]:
request_payload = json.dumps(
{
"input_data": {
"input_string": chat_messages,
"parameters": model_kwargs,
}
}
)
elif api_type == AzureMLEndpointApiType.serverless:
request_payload = json.dumps({"messages": chat_messages, **model_kwargs})
else:
raise ValueError(
f"`api_type` {api_type} is not supported by this formatter"
)
return str.encode(request_payload)
def format_response_payload(
self,
output: bytes,
api_type: AzureMLEndpointApiType = AzureMLEndpointApiType.dedicated,
) -> ChatGeneration:
"""Formats response"""
if api_type in [
AzureMLEndpointApiType.dedicated,
AzureMLEndpointApiType.realtime,
]:
try:
choice = json.loads(output)["output"]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e
return ChatGeneration(
message=AIMessage(
content=choice.strip(),
),
generation_info=None,
)
if api_type == AzureMLEndpointApiType.serverless:
try:
choice = json.loads(output)["choices"][0]
if not isinstance(choice, dict):
raise TypeError(
"Endpoint response is not well formed for a chat "
"model. Expected `dict` but `{type(choice)}` was received."
)
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e
return ChatGeneration(
message=AIMessage(content=choice["message"]["content"].strip())
if choice["message"]["role"] == "assistant"
else BaseMessage(
content=choice["message"]["content"].strip(),
type=choice["message"]["role"],
),
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
raise ValueError(f"`api_type` {api_type} is not supported by this formatter")
class LlamaChatContentFormatter(CustomOpenAIChatContentFormatter):
"""Deprecated: Kept for backwards compatibility
Chat Content formatter for Llama."""
def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`LlamaChatContentFormatter` will be deprecated in the future.
Please use `CustomOpenAIChatContentFormatter` instead.
"""
)
class MistralChatContentFormatter(LlamaChatContentFormatter):
"""Content formatter for `Mistral`."""
def format_messages_request_payload(
self,
messages: List[BaseMessage],
model_kwargs: Dict,
api_type: AzureMLEndpointApiType,
) -> bytes:
"""Formats the request according to the chosen api"""
chat_messages = [self._convert_message_to_dict(message) for message in messages]
if chat_messages and chat_messages[0]["role"] == "system":
# Mistral OSS models do not explicitly support system prompts, so we have to
# stash in the first user prompt
chat_messages[1]["content"] = (
chat_messages[0]["content"] + "\n\n" + chat_messages[1]["content"]
)
del chat_messages[0]
if api_type == AzureMLEndpointApiType.realtime:
request_payload = json.dumps(
{
"input_data": {
"input_string": chat_messages,
"parameters": model_kwargs,
}
}
)
elif api_type == AzureMLEndpointApiType.serverless:
request_payload = json.dumps({"messages": chat_messages, **model_kwargs})
else:
raise ValueError(
f"`api_type` {api_type} is not supported by this formatter"
)
return str.encode(request_payload)
class AzureMLChatOnlineEndpoint(BaseChatModel, AzureMLBaseEndpoint):
"""Azure ML Online Endpoint chat models.
Example:
.. code-block:: python
azure_llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/v1/chat/completions",
endpoint_api_type=AzureMLApiType.serverless,
endpoint_api_key="my-api-key",
content_formatter=chat_content_formatter,
)
"""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_chat_endpoint"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an AzureML Managed Online endpoint.
Args:
messages: The messages in the conversation with the chat model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model.invoke("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs.update(kwargs)
if stop:
_model_kwargs["stop"] = stop
request_payload = self.content_formatter.format_messages_request_payload(
messages, _model_kwargs, self.endpoint_api_type
)
response_payload = self.http_client.call(
body=request_payload, run_manager=run_manager
)
generations = self.content_formatter.format_response_payload(
response_payload, self.endpoint_api_type
)
return ChatResult(generations=[generations])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
self.endpoint_url = self.endpoint_url.replace("/chat/completions", "")
timeout = None if "timeout" not in kwargs else kwargs["timeout"]
import openai
params = {}
client_params = {
"api_key": self.endpoint_api_key.get_secret_value(),
"base_url": self.endpoint_url,
"timeout": timeout,
"default_headers": None,
"default_query": None,
"http_client": None,
}
client = openai.OpenAI(**client_params) # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
message_dicts = [
CustomOpenAIChatContentFormatter._convert_message_to_dict(m)
for m in messages
]
params = {"stream": True, "stop": stop, "model": None, **kwargs}
default_chunk_class = AIMessageChunk
for chunk in client.chat.completions.create(messages=message_dicts, **params): # type: ignore[arg-type]
if not isinstance(chunk, dict):
chunk = chunk.dict() # type: ignore[attr-defined]
if len(chunk["choices"]) == 0: # type: ignore[call-overload]
continue
choice = chunk["choices"][0] # type: ignore[call-overload]
chunk = _convert_delta_to_message_chunk( # type: ignore[assignment]
choice["delta"], # type: ignore[arg-type, index]
default_chunk_class, # type: ignore[arg-type, index]
)
generation_info = {}
if finish_reason := choice.get("finish_reason"): # type: ignore[union-attr]
generation_info["finish_reason"] = finish_reason
logprobs = choice.get("logprobs") # type: ignore[union-attr]
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = chunk.__class__ # type: ignore[assignment]
chunk = ChatGenerationChunk( # type: ignore[assignment]
message=chunk, # type: ignore[arg-type]
generation_info=generation_info or None, # type: ignore[arg-type]
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs) # type: ignore[attr-defined, arg-type]
yield chunk # type: ignore[misc]
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
self.endpoint_url = self.endpoint_url.replace("/chat/completions", "")
timeout = None if "timeout" not in kwargs else kwargs["timeout"]
import openai
params = {}
client_params = {
"api_key": self.endpoint_api_key.get_secret_value(),
"base_url": self.endpoint_url,
"timeout": timeout,
"default_headers": None,
"default_query": None,
"http_client": None,
}
async_client = openai.AsyncOpenAI(**client_params) # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
message_dicts = [
CustomOpenAIChatContentFormatter._convert_message_to_dict(m)
for m in messages
]
params = {"stream": True, "stop": stop, "model": None, **kwargs}
default_chunk_class = AIMessageChunk
async for chunk in await async_client.chat.completions.create( # type: ignore[attr-defined]
messages=message_dicts, # type: ignore[arg-type]
**params, # type: ignore[arg-type]
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info or None
)
if run_manager:
await run_manager.on_llm_new_token(
token=chunk.text, chunk=chunk, logprobs=logprobs
)
yield chunk
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content) # type: ignore[call-arg]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/meta.py | from typing import List
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
def _convert_one_message_to_text_llama(message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_llama(messages: List[BaseMessage]) -> str:
"""Convert a list of messages to a prompt for llama."""
return "\n".join(
[_convert_one_message_to_text_llama(message) for message in messages]
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/yandex.py | """Wrapper around YandexGPT chat models."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_community.llms.yandex import _BaseYandexGPT
logger = logging.getLogger(__name__)
def _parse_message(role: str, text: str) -> Dict:
return {"role": role, "text": text}
def _parse_chat_history(history: List[BaseMessage]) -> List[Dict[str, str]]:
"""Parse a sequence of messages into history.
Returns:
A list of parsed messages.
"""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message("user", content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message("assistant", content))
if isinstance(message, SystemMessage):
chat_history.append(_parse_message("system", content))
return chat_history
class ChatYandexGPT(_BaseYandexGPT, BaseChatModel):
"""YandexGPT large language models.
There are two authentication options for the service account
with the ``ai.languageModels.user`` role:
- You can specify the token in a constructor parameter `iam_token`
or in an environment variable `YC_IAM_TOKEN`.
- You can specify the key in a constructor parameter `api_key`
or in an environment variable `YC_API_KEY`.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatYandexGPT
chat_model = ChatYandexGPT(iam_token="t1.9eu...")
"""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
text = completion_with_retry(self, messages=messages)
text = text if stop is None else enforce_stop_tokens(text, stop)
message = AIMessage(content=text)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Async method to generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
text = await acompletion_with_retry(self, messages=messages)
text = text if stop is None else enforce_stop_tokens(text, stop)
message = AIMessage(content=text)
return ChatResult(generations=[ChatGeneration(message=message)])
def _make_request(
self: ChatYandexGPT,
messages: List[BaseMessage],
) -> str:
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
try:
from yandex.cloud.ai.foundation_models.v1.text_common_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2 import ( # noqa: E501
CompletionRequest,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpc import ( # noqa: E501
TextGenerationServiceStub,
)
except ModuleNotFoundError:
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501
CompletionRequest,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501
TextGenerationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK with `pip install yandexcloud` \
or upgrade it to recent version."
) from e
if not messages:
raise ValueError("You should provide at least one message to start the chat!")
message_history = _parse_chat_history(messages)
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = CompletionRequest(
model_uri=self.model_uri,
completion_options=CompletionOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
messages=[Message(**message) for message in message_history],
)
stub = TextGenerationServiceStub(channel)
res = stub.Completion(request, metadata=self.grpc_metadata)
return list(res)[0].alternatives[0].message.text
async def _amake_request(self: ChatYandexGPT, messages: List[BaseMessage]) -> str:
try:
import asyncio
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
try:
from yandex.cloud.ai.foundation_models.v1.text_common_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2 import ( # noqa: E501
CompletionRequest,
CompletionResponse,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpc import ( # noqa: E501
TextGenerationAsyncServiceStub,
)
except ModuleNotFoundError:
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501
CompletionRequest,
CompletionResponse,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501
TextGenerationAsyncServiceStub,
)
from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest
from yandex.cloud.operation.operation_service_pb2_grpc import (
OperationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK with `pip install yandexcloud` \
or upgrade it to recent version."
) from e
if not messages:
raise ValueError("You should provide at least one message to start the chat!")
message_history = _parse_chat_history(messages)
operation_api_url = "operation.api.cloud.yandex.net:443"
channel_credentials = grpc.ssl_channel_credentials()
async with grpc.aio.secure_channel(self.url, channel_credentials) as channel:
request = CompletionRequest(
model_uri=self.model_uri,
completion_options=CompletionOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
messages=[Message(**message) for message in message_history],
)
stub = TextGenerationAsyncServiceStub(channel)
operation = await stub.Completion(request, metadata=self.grpc_metadata)
async with grpc.aio.secure_channel(
operation_api_url, channel_credentials
) as operation_channel:
operation_stub = OperationServiceStub(operation_channel)
while not operation.done:
await asyncio.sleep(1)
operation_request = GetOperationRequest(operation_id=operation.id)
operation = await operation_stub.Get(
operation_request,
metadata=self.grpc_metadata,
)
completion_response = CompletionResponse()
operation.response.Unpack(completion_response)
return completion_response.alternatives[0].message.text
def _create_retry_decorator(llm: ChatYandexGPT) -> Callable[[Any], Any]:
from grpc import RpcError
min_seconds = llm.sleep_interval
max_seconds = 60
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type((RpcError))),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: ChatYandexGPT, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return _make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(llm: ChatYandexGPT, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**_kwargs: Any) -> Any:
return await _amake_request(llm, **_kwargs)
return await _completion_with_retry(**kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/gigachat.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
Type,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.gigachat import _BaseGigaChat
if TYPE_CHECKING:
import gigachat.models as gm
logger = logging.getLogger(__name__)
def _convert_dict_to_message(message: gm.Messages) -> BaseMessage:
from gigachat.models import FunctionCall, MessagesRole
additional_kwargs: Dict = {}
if function_call := message.function_call:
if isinstance(function_call, FunctionCall):
additional_kwargs["function_call"] = dict(function_call)
elif isinstance(function_call, dict):
additional_kwargs["function_call"] = function_call
if message.role == MessagesRole.SYSTEM:
return SystemMessage(content=message.content)
elif message.role == MessagesRole.USER:
return HumanMessage(content=message.content)
elif message.role == MessagesRole.ASSISTANT:
return AIMessage(content=message.content, additional_kwargs=additional_kwargs)
else:
raise TypeError(f"Got unknown role {message.role} {message}")
def _convert_message_to_dict(message: gm.BaseMessage) -> gm.Messages:
from gigachat.models import Messages, MessagesRole
if isinstance(message, SystemMessage):
return Messages(role=MessagesRole.SYSTEM, content=message.content)
elif isinstance(message, HumanMessage):
return Messages(role=MessagesRole.USER, content=message.content)
elif isinstance(message, AIMessage):
return Messages(
role=MessagesRole.ASSISTANT,
content=message.content,
function_call=message.additional_kwargs.get("function_call", None),
)
elif isinstance(message, ChatMessage):
return Messages(role=MessagesRole(message.role), content=message.content)
elif isinstance(message, FunctionMessage):
return Messages(role=MessagesRole.FUNCTION, content=message.content)
else:
raise TypeError(f"Got unknown type {message}")
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
@deprecated(
since="0.3.5",
removal="1.0",
alternative_import="langchain_gigachat.GigaChat",
)
class GigaChat(_BaseGigaChat, BaseChatModel):
"""`GigaChat` large language models API.
To use, you should pass login and password to access GigaChat API or use token.
Example:
.. code-block:: python
from langchain_community.chat_models import GigaChat
giga = GigaChat(credentials=..., scope=..., verify_ssl_certs=False)
"""
def _build_payload(self, messages: List[BaseMessage], **kwargs: Any) -> gm.Chat:
from gigachat.models import Chat
payload = Chat(
messages=[_convert_message_to_dict(m) for m in messages],
)
payload.functions = kwargs.get("functions", None)
payload.model = self.model
if self.profanity_check is not None:
payload.profanity_check = self.profanity_check
if self.temperature is not None:
payload.temperature = self.temperature
if self.top_p is not None:
payload.top_p = self.top_p
if self.max_tokens is not None:
payload.max_tokens = self.max_tokens
if self.repetition_penalty is not None:
payload.repetition_penalty = self.repetition_penalty
if self.update_interval is not None:
payload.update_interval = self.update_interval
if self.verbose:
logger.warning("Giga request: %s", payload.dict())
return payload
def _create_chat_result(self, response: Any) -> ChatResult:
generations = []
for res in response.choices:
message = _convert_dict_to_message(res.message)
finish_reason = res.finish_reason
gen = ChatGeneration(
message=message,
generation_info={"finish_reason": finish_reason},
)
generations.append(gen)
if finish_reason != "stop":
logger.warning(
"Giga generation stopped with reason: %s",
finish_reason,
)
if self.verbose:
logger.warning("Giga response: %s", message.content)
llm_output = {"token_usage": response.usage, "model_name": response.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
payload = self._build_payload(messages, **kwargs)
response = self._client.chat(payload)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
payload = self._build_payload(messages, **kwargs)
response = await self._client.achat(payload)
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
payload = self._build_payload(messages, **kwargs)
for chunk in self._client.stream(payload):
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
content = choice.get("delta", {}).get("content", {})
chunk = _convert_delta_to_message_chunk(choice["delta"], AIMessageChunk)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
if run_manager:
run_manager.on_llm_new_token(content)
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
payload = self._build_payload(messages, **kwargs)
async for chunk in self._client.astream(payload):
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
content = choice.get("delta", {}).get("content", {})
chunk = _convert_delta_to_message_chunk(choice["delta"], AIMessageChunk)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
if run_manager:
await run_manager.on_llm_new_token(content)
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/premai.py | """Wrapper around Prem's Chat API."""
from __future__ import annotations
import logging
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
)
if TYPE_CHECKING:
from premai.api.chat_completions.v1_chat_completions_create import (
ChatCompletionResponseStream,
)
from premai.models.chat_completion_response import ChatCompletionResponse
logger = logging.getLogger(__name__)
TOOL_PROMPT_HEADER = """
Given the set of tools you used and the response, provide the final answer\n
"""
INTERMEDIATE_TOOL_RESULT_TEMPLATE = """
{json}
"""
SINGLE_TOOL_PROMPT_TEMPLATE = """
tool id: {tool_id}
tool_response: {tool_response}
"""
class ChatPremAPIError(Exception):
"""Error with the `PremAI` API."""
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates text at the earliest stop token found."""
if stop is None:
return text
for stop_token in stop:
stop_token_idx = text.find(stop_token)
if stop_token_idx != -1:
text = text[:stop_token_idx]
return text
def _response_to_result(
response: ChatCompletionResponse,
stop: Optional[List[str]],
) -> ChatResult:
"""Converts a Prem API response into a LangChain result"""
if not response.choices:
raise ChatPremAPIError("ChatResponse must have at least one candidate")
generations: List[ChatGeneration] = []
for choice in response.choices:
role = choice.message.role
if role is None:
raise ChatPremAPIError(f"ChatResponse {choice} must have a role.")
# If content is None then it will be replaced by ""
content = _truncate_at_stop_tokens(text=choice.message.content or "", stop=stop)
if content is None:
raise ChatPremAPIError(f"ChatResponse must have a content: {content}")
if role == "assistant":
tool_calls = choice.message["tool_calls"]
if tool_calls is None:
tools = []
else:
tools = [
{
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"args": tool_call["function"]["arguments"],
}
for tool_call in tool_calls
]
generations.append(
ChatGeneration(
text=content, message=AIMessage(content=content, tool_calls=tools)
)
)
elif role == "user":
generations.append(
ChatGeneration(text=content, message=HumanMessage(content=content))
)
else:
generations.append(
ChatGeneration(
text=content, message=ChatMessage(role=role, content=content)
)
)
if response.document_chunks is not None:
return ChatResult(
generations=generations,
llm_output={
"document_chunks": [
chunk.to_dict() for chunk in response.document_chunks
]
},
)
else:
return ChatResult(generations=generations, llm_output={"document_chunks": None})
def _convert_delta_response_to_message_chunk(
response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk]
) -> Tuple[
Union[BaseMessageChunk, HumanMessageChunk, AIMessageChunk, SystemMessageChunk],
Optional[str],
]:
"""Converts delta response to message chunk"""
_delta = response.choices[0].delta # type: ignore
role = _delta.get("role", "") # type: ignore
content = _delta.get("content", "") # type: ignore
additional_kwargs: Dict = {}
finish_reasons: Optional[str] = response.choices[0].finish_reason
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content), finish_reasons
elif role == "assistant" or default_class == AIMessageChunk:
return (
AIMessageChunk(content=content, additional_kwargs=additional_kwargs),
finish_reasons,
)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content), finish_reasons
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role), finish_reasons
else:
return default_class(content=content), finish_reasons # type: ignore[call-arg]
def _messages_to_prompt_dict(
input_messages: List[BaseMessage],
template_id: Optional[str] = None,
) -> Tuple[Optional[str], List[Dict[str, Any]]]:
"""Converts a list of LangChain Messages into a simple dict
which is the message structure in Prem"""
system_prompt: Optional[str] = None
examples_and_messages: List[Dict[str, Any]] = []
for input_msg in input_messages:
if isinstance(input_msg, SystemMessage):
system_prompt = str(input_msg.content)
elif isinstance(input_msg, HumanMessage):
if template_id is None:
examples_and_messages.append(
{"role": "user", "content": str(input_msg.content)}
)
else:
params: Dict[str, str] = {}
assert (input_msg.id is not None) and (input_msg.id != ""), ValueError(
"When using prompt template there should be id associated ",
"with each HumanMessage",
)
params[str(input_msg.id)] = str(input_msg.content)
examples_and_messages.append(
{"role": "user", "template_id": template_id, "params": params}
)
elif isinstance(input_msg, AIMessage):
if input_msg.tool_calls is None or len(input_msg.tool_calls) == 0:
examples_and_messages.append(
{"role": "assistant", "content": str(input_msg.content)}
)
else:
ai_msg_to_json = {
"id": input_msg.id,
"content": input_msg.content,
"response_metadata": input_msg.response_metadata,
"tool_calls": input_msg.tool_calls,
}
examples_and_messages.append(
{
"role": "assistant",
"content": INTERMEDIATE_TOOL_RESULT_TEMPLATE.format(
json=ai_msg_to_json,
),
}
)
elif isinstance(input_msg, ToolMessage):
pass
else:
raise ChatPremAPIError("No such role explicitly exists")
# do a seperate search for tool calls
tool_prompt = ""
for input_msg in input_messages:
if isinstance(input_msg, ToolMessage):
tool_id = input_msg.tool_call_id
tool_result = input_msg.content
tool_prompt += SINGLE_TOOL_PROMPT_TEMPLATE.format(
tool_id=tool_id, tool_response=tool_result
)
if tool_prompt != "":
prompt = TOOL_PROMPT_HEADER
prompt += tool_prompt
examples_and_messages.append({"role": "user", "content": prompt})
return system_prompt, examples_and_messages
class ChatPremAI(BaseChatModel, BaseModel):
"""PremAI Chat models.
To use, you will need to have an API key. You can find your existing API Key
or generate a new one here: https://app.premai.io/api_keys/
"""
# TODO: Need to add the default parameters through prem-sdk here
project_id: int
"""The project ID in which the experiments or deployments are carried out.
You can find all your projects here: https://app.premai.io/projects/"""
premai_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Prem AI API Key. Get it here: https://app.premai.io/api_keys/"""
model: Optional[str] = Field(default=None, alias="model_name")
"""Name of the model. This is an optional parameter.
The default model is the one deployed from Prem's LaunchPad: https://app.premai.io/projects/8/launchpad
If model name is other than default model then it will override the calls
from the model deployed from launchpad."""
session_id: Optional[str] = None
"""The ID of the session to use. It helps to track the chat history."""
temperature: Optional[float] = Field(default=None)
"""Model temperature. Value should be >= 0 and <= 1.0"""
top_p: Optional[float] = None
"""top_p adjusts the number of choices for each predicted tokens based on
cumulative probabilities. Value should be ranging between 0.0 and 1.0.
"""
max_tokens: Optional[int] = Field(default=None)
"""The maximum number of tokens to generate"""
max_retries: int = Field(default=1)
"""Max number of retries to call the API"""
system_prompt: Optional[str] = ""
"""Acts like a default instruction that helps the LLM act or generate
in a specific way.This is an Optional Parameter. By default the
system prompt would be using Prem's Launchpad models system prompt.
Changing the system prompt would override the default system prompt.
"""
repositories: Optional[dict] = None
"""Add valid repository ids. This will be overriding existing connected
repositories (if any) and will use RAG with the connected repos.
"""
streaming: Optional[bool] = False
"""Whether to stream the responses or not."""
client: Any = None
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@pre_init
def validate_environments(cls, values: Dict) -> Dict:
"""Validate that the package is installed and that the API token is valid"""
try:
from premai import Prem
except ImportError as error:
raise ImportError(
"Could not import Prem Python package."
"Please install it with: `pip install premai`"
) from error
try:
premai_api_key: Union[str, SecretStr] = get_from_dict_or_env(
values, "premai_api_key", "PREMAI_API_KEY"
)
values["client"] = Prem(
api_key=premai_api_key
if isinstance(premai_api_key, str)
else premai_api_key._secret_value
)
except Exception as error:
raise ValueError("Your API Key is incorrect. Please try again.") from error
return values
@property
def _llm_type(self) -> str:
return "premai"
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"system_prompt": self.system_prompt,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"repositories": self.repositories,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
kwargs_to_ignore = [
"top_p",
"frequency_penalty",
"presence_penalty",
"logit_bias",
"stop",
"seed",
]
keys_to_remove = []
for key in kwargs:
if key in kwargs_to_ignore:
warnings.warn(f"WARNING: Parameter {key} is not supported in kwargs.")
keys_to_remove.append(key)
for key in keys_to_remove:
kwargs.pop(key)
all_kwargs = {**self._default_params, **kwargs}
for key in list(self._default_params.keys()):
if all_kwargs.get(key) is None or all_kwargs.get(key) == "":
all_kwargs.pop(key, None)
return all_kwargs
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if "template_id" in kwargs:
system_prompt, messages_to_pass = _messages_to_prompt_dict(
messages, template_id=kwargs["template_id"]
)
else:
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore
if system_prompt is not None and system_prompt != "":
kwargs["system_prompt"] = system_prompt
all_kwargs = self._get_all_kwargs(**kwargs)
response = chat_with_retry(
self,
project_id=self.project_id,
messages=messages_to_pass,
stream=False,
run_manager=run_manager,
**all_kwargs,
)
return _response_to_result(response=response, stop=stop)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if "template_id" in kwargs:
system_prompt, messages_to_pass = _messages_to_prompt_dict(
messages, template_id=kwargs["template_id"]
) # type: ignore
else:
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore
if stop is not None:
logger.warning("stop is not supported in langchain streaming")
if "system_prompt" not in kwargs:
if system_prompt is not None and system_prompt != "":
kwargs["system_prompt"] = system_prompt
all_kwargs = self._get_all_kwargs(**kwargs)
default_chunk_class = AIMessageChunk
for streamed_response in chat_with_retry(
self,
project_id=self.project_id,
messages=messages_to_pass,
stream=True,
run_manager=run_manager,
**all_kwargs,
):
try:
chunk, finish_reason = _convert_delta_response_to_message_chunk(
response=streamed_response, default_class=default_chunk_class
)
generation_info = (
dict(finish_reason=finish_reason)
if finish_reason is not None
else None
)
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
except Exception as _:
continue
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def create_prem_retry_decorator(
llm: ChatPremAI,
*,
max_retries: int = 1,
run_manager: Optional[Union[CallbackManagerForLLMRun]] = None,
) -> Callable[[Any], Any]:
"""Create a retry decorator for PremAI API errors."""
import premai.models
errors = [
premai.models.api_response_validation_error.APIResponseValidationError,
premai.models.conflict_error.ConflictError,
premai.models.model_not_found_error.ModelNotFoundError,
premai.models.permission_denied_error.PermissionDeniedError,
premai.models.provider_api_connection_error.ProviderAPIConnectionError,
premai.models.provider_api_status_error.ProviderAPIStatusError,
premai.models.provider_api_timeout_error.ProviderAPITimeoutError,
premai.models.provider_internal_server_error.ProviderInternalServerError,
premai.models.provider_not_found_error.ProviderNotFoundError,
premai.models.rate_limit_error.RateLimitError,
premai.models.unprocessable_entity_error.UnprocessableEntityError,
premai.models.validation_error.ValidationError,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=max_retries, run_manager=run_manager
)
return decorator
def chat_with_retry(
llm: ChatPremAI,
project_id: int,
messages: List[dict],
stream: bool = False,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Using tenacity for retry in completion call"""
retry_decorator = create_prem_retry_decorator(
llm, max_retries=llm.max_retries, run_manager=run_manager
)
@retry_decorator
def _completion_with_retry(
project_id: int,
messages: List[dict],
stream: Optional[bool] = False,
**kwargs: Any,
) -> Any:
response = llm.client.chat.completions.create(
project_id=project_id,
messages=messages,
stream=stream,
**kwargs,
)
return response
return _completion_with_retry(
project_id=project_id,
messages=messages,
stream=stream,
**kwargs,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/yuan2.py | """ChatYuan2 wrapper."""
from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import BaseModel, ConfigDict, Field, model_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
class ChatYuan2(BaseChatModel):
"""`Yuan2.0` Chat models API.
To use, you should have the ``openai-python`` package installed, if package
not installed, using ```pip install openai``` to install it. The
environment variable ``YUAN2_API_KEY`` set to your API key, if not set,
everyone can access apis.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatYuan2
chat = ChatYuan2()
"""
client: Any = None #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="yuan2", alias="model")
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
yuan2_api_key: Optional[str] = Field(default="EMPTY", alias="api_key")
"""Automatically inferred from env var `YUAN2_API_KEY` if not provided."""
yuan2_api_base: Optional[str] = Field(
default="http://127.0.0.1:8000/v1", alias="base_url"
)
"""Base URL path for API requests, an OpenAI compatible API server."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to yuan2 completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
temperature: float = 1.0
"""What sampling temperature to use."""
top_p: Optional[float] = 0.9
"""The top-p value to use for sampling."""
stop: Optional[List[str]] = Field(default=["<eod>"], alias="stop_sequences")
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
model_config = ConfigDict(
populate_by_name=True,
)
@property
def lc_secrets(self) -> Dict[str, str]:
return {"yuan2_api_key": "YUAN2_API_KEY"}
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.yuan2_api_base:
attributes["yuan2_api_base"] = self.yuan2_api_base
if self.yuan2_api_key:
attributes["yuan2_api_key"] = self.yuan2_api_key
return attributes
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["yuan2_api_key"] = get_from_dict_or_env(
values, "yuan2_api_key", "YUAN2_API_KEY"
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
client_params = {
"api_key": values["yuan2_api_key"],
"base_url": values["yuan2_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
}
# generate client and async_client
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling yuan2 API."""
params = {
"model": self.model_name,
"stream": self.streaming,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.request_timeout is not None:
params["request_timeout"] = self.request_timeout
return params
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
logger.debug(
f"type(llm_outputs): {type(llm_outputs)}; llm_outputs: {llm_outputs}"
)
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk,
generation_info=generation_info,
)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
generations = []
logger.debug(f"type(response): {type(response)}; response: {response}")
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res["finish_reason"])
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
llm_output = {
"token_usage": response.get("usage", {}),
"model_name": self.model_name,
}
return ChatResult(generations=generations, llm_output=llm_output)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, **params
):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk,
generation_info=generation_info,
)
if run_manager:
await run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(self, messages=message_dicts, **params)
return self._create_chat_result(response)
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
yuan2_creds: Dict[str, Any] = {
"model": self.model_name,
}
return {**yuan2_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "chat-yuan2"
def _create_retry_decorator(llm: ChatYuan2) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.APITimeoutError)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.InternalServerError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatYuan2, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.async_client.create(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict.get("role")
if role == "user":
return HumanMessage(content=_dict.get("content", ""))
elif role == "assistant":
return AIMessage(content=_dict.get("content", ""))
elif role == "system":
return SystemMessage(content=_dict.get("content", ""))
else:
return ChatMessage(content=_dict.get("content", ""), role=role) # type: ignore[arg-type]
def _convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"name": message.name,
"content": message.content,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/volcengine_maas.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List, Mapping, Optional, cast
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.volcengine_maas import VolcEngineMaasBase
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {"role": "function", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
def convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
"""Convert a dict to a message."""
content = _dict.get("choice", {}).get("message", {}).get("content", "")
return AIMessage(content=content)
class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase):
"""Volc Engine Maas hosts a plethora of models.
You can utilize these models through this class.
To use, you should have the ``volcengine`` python package installed.
and set access key and secret key by environment variable or direct pass those
to this class.
access key, secret key are required parameters which you could get help
https://www.volcengine.com/docs/6291/65568
In order to use them, it is necessary to install the 'volcengine' Python package.
The access key and secret key must be set either via environment variables or
passed directly to this class.
access key and secret key are mandatory parameters for which assistance can be
sought at https://www.volcengine.com/docs/6291/65568.
The two methods are as follows:
* Environment Variable
Set the environment variables 'VOLC_ACCESSKEY' and 'VOLC_SECRETKEY' with your
access key and secret key.
* Pass Directly to Class
Example:
.. code-block:: python
from langchain_community.llms import VolcEngineMaasLLM
model = VolcEngineMaasChat(model="skylark-lite-public",
volc_engine_maas_ak="your_ak",
volc_engine_maas_sk="your_sk")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "volc-engine-maas-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
model_req = {
"model": {
"name": self.model,
}
}
if self.model_version is not None:
model_req["model"]["version"] = self.model_version
return {
**model_req,
"messages": [_convert_message_to_dict(message) for message in messages],
"parameters": {**self._default_params, **kwargs},
}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if stop is not None:
kwargs["stop"] = stop
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.stream_chat(params):
if res:
msg = convert_dict_to_message(res)
chunk = ChatGenerationChunk(message=AIMessageChunk(content=msg.content))
if run_manager:
run_manager.on_llm_new_token(cast(str, msg.content), chunk=chunk)
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
if stop is not None:
kwargs["stop"] = stop
params = self._convert_prompt_msg_params(messages, **kwargs)
res = self.client.chat(params)
msg = convert_dict_to_message(res)
completion = cast(str, msg.content)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/octoai.py | """OctoAI Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from typing import (
Any,
Callable,
Dict,
Literal,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.messages import BaseMessage
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import Field, SecretStr
from langchain_community.chat_models.openai import ChatOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "llama-2-13b-chat"
class ChatOctoAI(ChatOpenAI):
"""OctoAI Chat large language models.
See https://octo.ai/ for information about OctoAI.
To use, you should have the ``openai`` python package installed and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token.
Alternatively, you can use the octoai_api_token keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOctoAI
chat = ChatOctoAI(model_name="mixtral-8x7b-instruct")
"""
octoai_api_base: str = Field(default=DEFAULT_API_BASE)
octoai_api_token: SecretStr = Field(default=SecretStr(""), alias="api_key")
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "octoai-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"octoai_api_token": "OCTOAI_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["octoai_api_base"] = get_from_dict_or_env(
values,
"octoai_api_base",
"OCTOAI_API_BASE",
default=DEFAULT_API_BASE,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model_name"] = get_from_dict_or_env(
values,
"model_name",
"MODEL_NAME",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["octoai_api_base"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
else:
values["openai_api_base"] = values["octoai_api_base"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
*,
tool_choice: Optional[
Union[dict, str, Literal["auto", "none", "required", "any"], bool]
] = None,
strict: Optional[bool] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Imitating bind_tool method from langchain_openai.ChatOpenAI"""
formatted_tools = [
convert_to_openai_tool(tool, strict=strict) for tool in tools
]
if tool_choice:
if isinstance(tool_choice, str):
# tool_choice is a tool/function name
if tool_choice not in ("auto", "none", "any", "required"):
tool_choice = {
"type": "function",
"function": {"name": tool_choice},
}
# 'any' is not natively supported by OpenAI API.
# We support 'any' since other models use this instead of 'required'.
if tool_choice == "any":
tool_choice = "required"
elif isinstance(tool_choice, bool):
tool_choice = "required"
elif isinstance(tool_choice, dict):
tool_names = [
formatted_tool["function"]["name"]
for formatted_tool in formatted_tools
]
if not any(
tool_name == tool_choice["function"]["name"]
for tool_name in tool_names
):
raise ValueError(
f"Tool choice {tool_choice} was specified, but the only "
f"provided tools were {tool_names}."
)
else:
raise ValueError(
f"Unrecognized tool_choice type. Expected str, bool or dict. "
f"Received: {tool_choice}"
)
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/oci_data_science.py | # Copyright (c) 2024, Oracle and/or its affiliates.
"""Chat model for OCI data science model deployment endpoint."""
import importlib
import json
import logging
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk
from langchain_core.output_parsers import (
JsonOutputParser,
PydanticOutputParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, Field, model_validator
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
DEFAULT_MODEL_NAME,
BaseOCIModelDeployment,
)
logger = logging.getLogger(__name__)
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and issubclass(obj, BaseModel)
class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
"""OCI Data Science Model Deployment chat model integration.
Setup:
Install ``oracle-ads`` and ``langchain-openai``.
.. code-block:: bash
pip install -U oracle-ads langchain-openai
Use `ads.set_auth()` to configure authentication.
For example, to use OCI resource_principal for authentication:
.. code-block:: python
import ads
ads.set_auth("resource_principal")
For more details on authentication, see:
https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm
Key init args - completion params:
endpoint: str
The OCI model deployment endpoint.
temperature: float
Sampling temperature.
max_tokens: Optional[int]
Max number of tokens to generate.
Key init args — client params:
auth: dict
ADS auth dictionary for OCI authentication.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatOCIModelDeployment
chat = ChatOCIModelDeployment(
endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict",
model="odsc-llm",
streaming=True,
max_retries=3,
model_kwargs={
"max_token": 512,
"temperature": 0.2,
# other model parameters ...
},
)
Invocation:
.. code-block:: python
messages = [
("system", "Translate the user sentence to French."),
("human", "Hello World!"),
]
chat.invoke(messages)
.. code-block:: python
AIMessage(
content='Bonjour le monde!',
response_metadata={
'token_usage': {
'prompt_tokens': 40,
'total_tokens': 50,
'completion_tokens': 10
},
'model_name': 'odsc-llm',
'system_fingerprint': '',
'finish_reason': 'stop'
},
id='run-cbed62da-e1b3-4abd-9df3-ec89d69ca012-0'
)
Streaming:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk)
.. code-block:: python
content='' id='run-02c6-c43f-42de'
content='\n' id='run-02c6-c43f-42de'
content='B' id='run-02c6-c43f-42de'
content='on' id='run-02c6-c43f-42de'
content='j' id='run-02c6-c43f-42de'
content='our' id='run-02c6-c43f-42de'
content=' le' id='run-02c6-c43f-42de'
content=' monde' id='run-02c6-c43f-42de'
content='!' id='run-02c6-c43f-42de'
content='' response_metadata={'finish_reason': 'stop'} id='run-02c6-c43f-42de'
Async:
.. code-block:: python
await chat.ainvoke(messages)
# stream:
# async for chunk in (await chat.astream(messages))
.. code-block:: python
AIMessage(
content='Bonjour le monde!',
response_metadata={'finish_reason': 'stop'},
id='run-8657a105-96b7-4bb6-b98e-b69ca420e5d1-0'
)
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
structured_llm = chat.with_structured_output(Joke, method="json_mode")
structured_llm.invoke(
"Tell me a joke about cats, "
"respond in JSON with `setup` and `punchline` keys"
)
.. code-block:: python
Joke(
setup='Why did the cat get stuck in the tree?',
punchline='Because it was chasing its tail!'
)
See ``ChatOCIModelDeployment.with_structured_output()`` for more.
Customized Usage:
You can inherit from base class and overwrite the `_process_response`,
`_process_stream_response`, `_construct_json_body` for customized usage.
.. code-block:: python
class MyChatModel(ChatOCIModelDeployment):
def _process_stream_response(self, response_json: dict) -> ChatGenerationChunk:
print("My customized streaming result handler.")
return GenerationChunk(...)
def _process_response(self, response_json:dict) -> ChatResult:
print("My customized output handler.")
return ChatResult(...)
def _construct_json_body(self, messages: list, params: dict) -> dict:
print("My customized payload handler.")
return {
"messages": messages,
**params,
}
chat = MyChatModel(
endpoint=f"https://modeldeployment.<region>.oci.customer-oci.com/{ocid}/predict",
model="odsc-llm",
}
chat.invoke("tell me a joke")
Response metadata
.. code-block:: python
ai_msg = chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
'token_usage': {
'prompt_tokens': 40,
'total_tokens': 50,
'completion_tokens': 10
},
'model_name': 'odsc-llm',
'system_fingerprint': '',
'finish_reason': 'stop'
}
""" # noqa: E501
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
model: str = DEFAULT_MODEL_NAME
"""The name of the model."""
stop: Optional[List[str]] = None
"""Stop words to use when generating. Model output is cut off
at the first occurrence of any of these substrings."""
@model_validator(mode="before")
@classmethod
def validate_openai(cls, values: Any) -> Any:
"""Checks if langchain_openai is installed."""
if not importlib.util.find_spec("langchain_openai"):
raise ImportError(
"Could not import langchain_openai package. "
"Please install it with `pip install langchain_openai`."
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_depolyment_chat_endpoint"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint": self.endpoint, "model_kwargs": _model_kwargs},
**self._default_params,
}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters."""
return {
"model": self.model,
"stop": self.stop,
"stream": self.streaming,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an OCI Model Deployment Online endpoint.
Args:
messages: The messages in the conversation with the chat model.
stop: Optional list of stop words to use when generating.
Returns:
LangChain ChatResult
Raises:
RuntimeError:
Raise when invoking endpoint fails.
Example:
.. code-block:: python
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "Hello World!"),
]
response = chat.invoke(messages)
""" # noqa: E501
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
requests_kwargs = kwargs.pop("requests_kwargs", {})
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(messages, params)
res = self.completion_with_retry(
data=body, run_manager=run_manager, **requests_kwargs
)
return self._process_response(res.json())
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream OCI Data Science Model Deployment endpoint on given messages.
Args:
messages (List[BaseMessage]):
The messagaes to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
kwargs:
requests_kwargs:
Additional ``**kwargs`` to pass to requests.post
Returns:
An iterator of ChatGenerationChunk.
Raises:
RuntimeError:
Raise when invoking endpoint fails.
Example:
.. code-block:: python
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "Hello World!"),
]
chunk_iter = chat.stream(messages)
""" # noqa: E501
requests_kwargs = kwargs.pop("requests_kwargs", {})
self.streaming = True
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(messages, params) # request json body
response = self.completion_with_retry(
data=body, run_manager=run_manager, stream=True, **requests_kwargs
)
default_chunk_class = AIMessageChunk
for line in self._parse_stream(response.iter_lines()):
chunk = self._handle_sse_line(line, default_chunk_class)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Asynchronously call out to OCI Data Science Model Deployment
endpoint on given messages.
Args:
messages (List[BaseMessage]):
The messagaes to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
kwargs:
requests_kwargs:
Additional ``**kwargs`` to pass to requests.post
Returns:
LangChain ChatResult.
Raises:
ValueError:
Raise when invoking endpoint fails.
Example:
.. code-block:: python
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "I love programming."),
]
resp = await chat.ainvoke(messages)
""" # noqa: E501
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
requests_kwargs = kwargs.pop("requests_kwargs", {})
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(messages, params)
response = await self.acompletion_with_retry(
data=body,
run_manager=run_manager,
**requests_kwargs,
)
return self._process_response(response)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
"""Asynchronously streaming OCI Data Science Model Deployment
endpoint on given messages.
Args:
messages (List[BaseMessage]):
The messagaes to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
kwargs:
requests_kwargs:
Additional ``**kwargs`` to pass to requests.post
Returns:
An Asynciterator of ChatGenerationChunk.
Raises:
ValueError:
Raise when invoking endpoint fails.
Example:
.. code-block:: python
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "I love programming."),
]
chunk_iter = await chat.astream(messages)
""" # noqa: E501
requests_kwargs = kwargs.pop("requests_kwargs", {})
self.streaming = True
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(messages, params) # request json body
default_chunk_class = AIMessageChunk
async for line in await self.acompletion_with_retry(
data=body, run_manager=run_manager, stream=True, **requests_kwargs
):
chunk = self._handle_sse_line(line, default_chunk_class)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def with_structured_output(
self,
schema: Optional[Union[Dict, Type[BaseModel]]] = None,
*,
method: Literal["json_mode"] = "json_mode",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec.
method: The method for steering model generation, currently only support
for "json_mode". If "json_mode" then JSON mode will be used. Note that
if using "json_mode" then you must include instructions for formatting
the output into the desired schema into the model call.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = _is_pydantic_class(schema)
if method == "json_mode":
llm = self.bind(response_format={"type": "json_object"})
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
else:
raise ValueError(
f"Unrecognized method argument. Expected `json_mode`."
f"Received: `{method}`."
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
"""Combines the invocation parameters with default parameters."""
params = self._default_params
_model_kwargs = self.model_kwargs or {}
params["stop"] = stop or params.get("stop", [])
return {**params, **_model_kwargs, **kwargs}
def _handle_sse_line(
self, line: str, default_chunk_cls: Type[BaseMessageChunk] = AIMessageChunk
) -> ChatGenerationChunk:
"""Handle a single Server-Sent Events (SSE) line and process it into
a chat generation chunk.
Args:
line (str): A single line from the SSE stream in string format.
default_chunk_cls (AIMessageChunk): The default class for message
chunks to be used during the processing of the stream response.
Returns:
ChatGenerationChunk: The processed chat generation chunk. If an error
occurs, an empty `ChatGenerationChunk` is returned.
"""
try:
obj = json.loads(line)
return self._process_stream_response(obj, default_chunk_cls)
except Exception as e:
logger.debug(f"Error occurs when processing line={line}: {str(e)}")
return ChatGenerationChunk(message=AIMessageChunk(content=""))
def _construct_json_body(self, messages: list, params: dict) -> dict:
"""Constructs the request body as a dictionary (JSON).
Args:
messages (list): A list of message objects to be included in the
request body.
params (dict): A dictionary of additional parameters to be included
in the request body.
Returns:
dict: A dictionary representing the JSON request body, including
converted messages and additional parameters.
"""
from langchain_openai.chat_models.base import _convert_message_to_dict
return {
"messages": [_convert_message_to_dict(m) for m in messages],
**params,
}
def _process_stream_response(
self,
response_json: dict,
default_chunk_cls: Type[BaseMessageChunk] = AIMessageChunk,
) -> ChatGenerationChunk:
"""Formats streaming response in OpenAI spec.
Args:
response_json (dict): The JSON response from the streaming endpoint.
default_chunk_cls (type, optional): The default class to use for
creating message chunks. Defaults to `AIMessageChunk`.
Returns:
ChatGenerationChunk: An object containing the processed message
chunk and any relevant generation information such as finish
reason and usage.
Raises:
ValueError: If the response JSON is not well-formed or does not
contain the expected structure.
"""
from langchain_openai.chat_models.base import _convert_delta_to_message_chunk
try:
choice = response_json["choices"][0]
if not isinstance(choice, dict):
raise TypeError("Endpoint response is not well formed.")
except (KeyError, IndexError, TypeError) as e:
raise ValueError(
"Error while formatting response payload for chat model of type"
) from e
chunk = _convert_delta_to_message_chunk(choice["delta"], default_chunk_cls)
default_chunk_cls = chunk.__class__
finish_reason = choice.get("finish_reason")
usage = choice.get("usage")
gen_info = {}
if finish_reason is not None:
gen_info.update({"finish_reason": finish_reason})
if usage is not None:
gen_info.update({"usage": usage})
return ChatGenerationChunk(
message=chunk, generation_info=gen_info if gen_info else None
)
def _process_response(self, response_json: dict) -> ChatResult:
"""Formats response in OpenAI spec.
Args:
response_json (dict): The JSON response from the chat model endpoint.
Returns:
ChatResult: An object containing the list of `ChatGeneration` objects
and additional LLM output information.
Raises:
ValueError: If the response JSON is not well-formed or does not
contain the expected structure.
"""
from langchain_openai.chat_models.base import _convert_dict_to_message
generations = []
try:
choices = response_json["choices"]
if not isinstance(choices, list):
raise TypeError("Endpoint response is not well formed.")
except (KeyError, TypeError) as e:
raise ValueError(
"Error while formatting response payload for chat model of type"
) from e
for choice in choices:
message = _convert_dict_to_message(choice["message"])
generation_info = dict(finish_reason=choice.get("finish_reason"))
if "logprobs" in choice:
generation_info["logprobs"] = choice["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
token_usage = response_json.get("usage", {})
llm_output = {
"token_usage": token_usage,
"model_name": self.model,
"system_fingerprint": response_json.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
class ChatOCIModelDeploymentVLLM(ChatOCIModelDeployment):
"""OCI large language chat models deployed with vLLM.
To use, you must provide the model HTTP endpoint from your deployed
model, e.g. https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict.
To authenticate, `oracle-ads` has been used to automatically load
credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOCIModelDeploymentVLLM
chat = ChatOCIModelDeploymentVLLM(
endpoint="https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict",
frequency_penalty=0.1,
max_tokens=512,
temperature=0.2,
top_p=1.0,
# other model parameters...
)
""" # noqa: E501
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
logit_bias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate in the completion."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
temperature: float = 0.2
"""What sampling temperature to use."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
best_of: Optional[int] = None
"""Generates best_of completions server-side and returns the "best"
(the one with the highest log probability per token).
"""
use_beam_search: Optional[bool] = False
"""Whether to use beam search instead of sampling."""
top_k: Optional[int] = -1
"""Number of most likely tokens to consider at each step."""
min_p: Optional[float] = 0.0
"""Float that represents the minimum probability for a token to be considered.
Must be in [0,1]. 0 to disable this."""
repetition_penalty: Optional[float] = 1.0
"""Float that penalizes new tokens based on their frequency in the
generated text. Values > 1 encourage the model to use new tokens."""
length_penalty: Optional[float] = 1.0
"""Float that penalizes sequences based on their length. Used only
when `use_beam_search` is True."""
early_stopping: Optional[bool] = False
"""Controls the stopping condition for beam search. It accepts the
following values: `True`, where the generation stops as soon as there
are `best_of` complete candidates; `False`, where a heuristic is applied
to the generation stops when it is very unlikely to find better candidates;
`never`, where the beam search procedure only stops where there cannot be
better candidates (canonical beam search algorithm)."""
ignore_eos: Optional[bool] = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
min_tokens: Optional[int] = 0
"""Minimum number of tokens to generate per output sequence before
EOS or stop_token_ids can be generated"""
stop_token_ids: Optional[List[int]] = None
"""List of tokens that stop the generation when they are generated.
The returned output will contain the stop tokens unless the stop tokens
are special tokens."""
skip_special_tokens: Optional[bool] = True
"""Whether to skip special tokens in the output. Defaults to True."""
spaces_between_special_tokens: Optional[bool] = True
"""Whether to add spaces between special tokens in the output.
Defaults to True."""
tool_choice: Optional[str] = None
"""Whether to use tool calling.
Defaults to None, tool calling is disabled.
Tool calling requires model support and the vLLM to be configured
with `--tool-call-parser`.
Set this to `auto` for the model to make tool calls automatically.
Set this to `required` to force the model to always call one or more tools.
"""
chat_template: Optional[str] = None
"""Use customized chat template.
Defaults to None. The chat template from the tokenizer will be used.
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_depolyment_chat_endpoint_vllm"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters."""
params = {
"model": self.model,
"stop": self.stop,
"stream": self.streaming,
}
for attr_name in self._get_model_params():
try:
value = getattr(self, attr_name)
if value is not None:
params.update({attr_name: value})
except Exception:
pass
return params
def _get_model_params(self) -> List[str]:
"""Gets the name of model parameters."""
return [
"best_of",
"early_stopping",
"frequency_penalty",
"ignore_eos",
"length_penalty",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"min_tokens",
"n",
"presence_penalty",
"repetition_penalty",
"skip_special_tokens",
"spaces_between_special_tokens",
"stop_token_ids",
"temperature",
"top_k",
"top_p",
"use_beam_search",
"tool_choice",
"chat_template",
]
class ChatOCIModelDeploymentTGI(ChatOCIModelDeployment):
"""OCI large language chat models deployed with Text Generation Inference.
To use, you must provide the model HTTP endpoint from your deployed
model, e.g. https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict.
To authenticate, `oracle-ads` has been used to automatically load
credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOCIModelDeploymentTGI
chat = ChatOCIModelDeploymentTGI(
endpoint="https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict",
max_token=512,
temperature=0.2,
frequency_penalty=0.1,
seed=42,
# other model parameters...
)
""" # noqa: E501
frequency_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
logit_bias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
logprobs: Optional[bool] = None
"""Whether to return log probabilities of the output tokens or not."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens. Between 0 and 1."""
seed: Optional[int] = None
"""To sample deterministically,"""
temperature: float = 0.2
"""What sampling temperature to use."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
top_logprobs: Optional[int] = None
"""An integer between 0 and 5 specifying the number of most
likely tokens to return at each token position, each with an
associated log probability. logprobs must be set to true if
this parameter is used."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_depolyment_chat_endpoint_tgi"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters."""
params = {
"model": self.model,
"stop": self.stop,
"stream": self.streaming,
}
for attr_name in self._get_model_params():
try:
value = getattr(self, attr_name)
if value is not None:
params.update({attr_name: value})
except Exception:
pass
return params
def _get_model_params(self) -> List[str]:
"""Gets the name of model parameters."""
return [
"frequency_penalty",
"logit_bias",
"logprobs",
"max_tokens",
"n",
"presence_penalty",
"seed",
"temperature",
"top_k",
"top_p",
"top_logprobs",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/reka.py | import json
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, ConfigDict, Field, model_validator
DEFAULT_REKA_MODEL = "reka-flash"
ContentType = Union[str, List[Union[str, Dict[str, Any]]]]
def process_content_item(item: Dict[str, Any]) -> Dict[str, Any]:
"""Process a single content item."""
if item["type"] == "image_url":
image_url = item["image_url"]
if isinstance(image_url, dict) and "url" in image_url:
# If it's in LangChain format, extract the URL value
item["image_url"] = image_url["url"]
return item
def process_content(content: ContentType) -> List[Dict[str, Any]]:
"""Process content to handle both text and media inputs,
returning a list of content items."""
if isinstance(content, str):
return [{"type": "text", "text": content}]
elif isinstance(content, list):
result = []
for item in content:
if isinstance(item, str):
result.append({"type": "text", "text": item})
elif isinstance(item, dict):
result.append(process_content_item(item))
else:
raise ValueError(f"Invalid content item format: {item}")
return result
else:
raise ValueError("Invalid content format")
def convert_to_reka_messages(messages: List[BaseMessage]) -> List[Dict[str, Any]]:
"""Convert LangChain messages to Reka message format."""
reka_messages: List[Dict[str, Any]] = []
system_message: Optional[str] = None
for message in messages:
if isinstance(message, SystemMessage):
if system_message is None:
if isinstance(message.content, str):
system_message = message.content
else:
raise TypeError("SystemMessage content must be a string.")
else:
raise ValueError("Multiple system messages are not supported.")
elif isinstance(message, HumanMessage):
processed_content = process_content(message.content)
if system_message:
if (
processed_content
and isinstance(processed_content[0], dict)
and processed_content[0].get("type") == "text"
and "text" in processed_content[0]
):
processed_content[0]["text"] = (
f"{system_message}\n{processed_content[0]['text']}"
)
else:
processed_content.insert(
0, {"type": "text", "text": system_message}
)
system_message = None
reka_messages.append({"role": "user", "content": processed_content})
elif isinstance(message, AIMessage):
reka_message: Dict[str, Any] = {"role": "assistant"}
if message.content:
processed_content = process_content(message.content)
reka_message["content"] = processed_content
if "tool_calls" in message.additional_kwargs:
tool_calls = message.additional_kwargs["tool_calls"]
formatted_tool_calls = []
for tool_call in tool_calls:
formatted_tool_call = {
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"parameters": json.loads(tool_call["function"]["arguments"]),
}
formatted_tool_calls.append(formatted_tool_call)
reka_message["tool_calls"] = formatted_tool_calls
reka_messages.append(reka_message)
elif isinstance(message, ToolMessage):
content_list: List[Dict[str, Any]] = []
content_list.append(
{
"tool_call_id": message.tool_call_id,
"output": json.dumps({"status": message.content}),
}
)
reka_messages.append(
{
"role": "tool_output",
"content": content_list,
}
)
else:
raise ValueError(f"Unsupported message type: {type(message)}")
return reka_messages
class ChatReka(BaseChatModel):
"""Reka chat large language models."""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_REKA_MODEL)
max_tokens: int = Field(default=256)
temperature: Optional[float] = None
streaming: bool = False
default_request_timeout: Optional[float] = None
max_retries: int = 2
reka_api_key: Optional[str] = None
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
model_config = ConfigDict(extra="forbid")
token_counter: Optional[
Callable[[Union[str, BaseMessage, List[BaseMessage]]], int]
] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that API key and Python package exist in the environment."""
reka_api_key = values.get("reka_api_key")
reka_api_key = get_from_dict_or_env(
{"reka_api_key": reka_api_key}, "reka_api_key", "REKA_API_KEY"
)
values["reka_api_key"] = reka_api_key
try:
# Import reka libraries here
from reka.client import AsyncReka, Reka
values["client"] = Reka(
api_key=reka_api_key,
)
values["async_client"] = AsyncReka(
api_key=reka_api_key,
)
except ImportError:
raise ImportError(
"Could not import Reka Python package. "
"Please install it with `pip install reka-api`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Reka API."""
params = {
"model": self.model,
"max_tokens": self.max_tokens,
}
if self.temperature is not None:
params["temperature"] = self.temperature
return {**params, **self.model_kwargs}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "reka-chat"
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
reka_messages = convert_to_reka_messages(messages)
params = {**self._default_params, **kwargs}
if stop:
params["stop"] = stop
stream = self.client.chat.create_stream(messages=reka_messages, **params)
for chunk in stream:
content = chunk.responses[0].chunk.content
chat_chunk = ChatGenerationChunk(message=AIMessageChunk(content=content))
if run_manager:
run_manager.on_llm_new_token(content, chunk=chat_chunk)
yield chat_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
reka_messages = convert_to_reka_messages(messages)
params = {**self._default_params, **kwargs}
if stop:
params["stop"] = stop
stream = self.async_client.chat.create_stream(messages=reka_messages, **params)
async for chunk in stream:
content = chunk.responses[0].chunk.content
chat_chunk = ChatGenerationChunk(message=AIMessageChunk(content=content))
if run_manager:
await run_manager.on_llm_new_token(content, chunk=chat_chunk)
yield chat_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
return generate_from_stream(
self._stream(messages, stop=stop, run_manager=run_manager, **kwargs)
)
reka_messages = convert_to_reka_messages(messages)
params = {**self._default_params, **kwargs}
if stop:
params["stop"] = stop
response = self.client.chat.create(messages=reka_messages, **params)
if response.responses[0].message.tool_calls:
tool_calls = response.responses[0].message.tool_calls
message = AIMessage(
content="", # Empty string instead of None
additional_kwargs={
"tool_calls": [
{
"id": tc.id,
"type": "function",
"function": {
"name": tc.name,
"arguments": json.dumps(tc.parameters),
},
}
for tc in tool_calls
]
},
)
else:
content = response.responses[0].message.content
# Ensure content is never None
message = AIMessage(content=content if content is not None else "")
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
return await agenerate_from_stream(
self._astream(messages, stop=stop, run_manager=run_manager, **kwargs)
)
reka_messages = convert_to_reka_messages(messages)
params = {**self._default_params, **kwargs}
if stop:
params["stop"] = stop
response = await self.async_client.chat.create(messages=reka_messages, **params)
if response.responses[0].message.tool_calls:
tool_calls = response.responses[0].message.tool_calls
message = AIMessage(
content="", # Empty string instead of None
additional_kwargs={
"tool_calls": [
{
"id": tc.id,
"type": "function",
"function": {
"name": tc.name,
"arguments": json.dumps(tc.parameters),
},
}
for tc in tool_calls
]
},
)
else:
content = response.responses[0].message.content
# Ensure content is never None
message = AIMessage(content=content if content is not None else "")
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, input: Union[str, BaseMessage, List[BaseMessage]]) -> int:
"""Calculate number of tokens.
Args:
input: Either a string, a single BaseMessage, or a list of BaseMessages.
Returns:
int: Number of tokens in the input.
Raises:
ImportError: If tiktoken is not installed.
ValueError: If message content is not a string.
"""
if self.token_counter is not None:
return self.token_counter(input)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"Please install it with `pip install tiktoken`."
)
encoding = tiktoken.get_encoding("cl100k_base")
if isinstance(input, str):
return len(encoding.encode(input))
elif isinstance(input, BaseMessage):
content = input.content
if not isinstance(content, str):
raise ValueError(
f"Message content must be a string, got {type(content)}"
)
return len(encoding.encode(content))
elif isinstance(input, list):
total = 0
for msg in input:
content = msg.content
if not isinstance(content, str):
raise ValueError(
f"Message content must be a string, got {type(content)}"
)
total += len(encoding.encode(content))
return total
else:
raise TypeError(f"Unsupported input type: {type(input)}")
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: str = "auto",
strict: Optional[bool] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
The `tool_choice` parameter controls how the model uses the tools you pass.
There are three available options:
- `"auto"`: Lets the model decide whether or not to invoke a tool. This is the
recommended way to do function calling with our models.
- `"none"`: Disables tool calling. In this case, even if you pass tools to
the model, the model will not invoke any tools.
- `"tool"`: Forces the model to invoke one or more of the tools it has
been passed.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Controls how the model uses the tools you pass.
Options are "auto", "none", or "tool". Defaults to "auto".
strict:
If True, model output is guaranteed to exactly match the JSON Schema
provided in the tool definition.
If False, input schema will not be validated
and model output will not be validated.
If None, ``strict`` argument will not
be passed to the model.
kwargs: Any additional parameters are passed directly to the model.
Returns:
Runnable: An executable chain or component.
"""
formatted_tools = [
convert_to_openai_tool(tool, strict=strict) for tool in tools
]
# Ensure tool_choice is one of the allowed options
if tool_choice not in ("auto", "none", "tool"):
raise ValueError(
f"Invalid tool_choice '{tool_choice}' provided. "
"Tool choice must be one of: 'auto', 'none', or 'tool'."
)
# Map tool_choice to the parameter expected by the Reka API
kwargs["tool_choice"] = tool_choice
# Pass the tools and updated kwargs to the model
formatted_tools = [tool["function"] for tool in formatted_tools]
return super().bind(tools=formatted_tools, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/vertexai.py | """Wrapper around Google VertexAI chat-based models."""
from __future__ import annotations
import base64
import logging
import re
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast
from urllib.parse import urlparse
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import pre_init
from langchain_community.llms.vertexai import (
_VertexAICommon,
is_codey_model,
is_gemini_model,
)
from langchain_community.utilities.vertexai import (
load_image_from_gcs,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from vertexai.language_models import (
ChatMessage,
ChatSession,
CodeChatSession,
InputOutputTextPair,
)
from vertexai.preview.generative_models import Content
logger = logging.getLogger(__name__)
@dataclass
class _ChatHistory:
"""Represents a context and a history of messages."""
history: List["ChatMessage"] = field(default_factory=list)
context: Optional[str] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
"""
from vertexai.language_models import ChatMessage
vertex_messages, context = [], None
for i, message in enumerate(history):
content = cast(str, message.content)
if i == 0 and isinstance(message, SystemMessage):
context = content
elif isinstance(message, AIMessage):
vertex_message = ChatMessage(content=message.content, author="bot")
vertex_messages.append(vertex_message)
elif isinstance(message, HumanMessage):
vertex_message = ChatMessage(content=message.content, author="user")
vertex_messages.append(vertex_message)
else:
raise ValueError(
f"Unexpected message with type {type(message)} at the position {i}."
)
chat_history = _ChatHistory(context=context, history=vertex_messages)
return chat_history
def _is_url(s: str) -> bool:
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f"Unable to parse URL: {e}")
return False
def _parse_chat_history_gemini(
history: List[BaseMessage], project: Optional[str]
) -> List["Content"]:
from vertexai.preview.generative_models import Content, Image, Part
def _convert_to_prompt(part: Union[str, Dict]) -> Part:
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!"
)
if part["type"] == "text":
return Part.from_text(part["text"])
elif part["type"] == "image_url":
path = part["image_url"]["url"]
if path.startswith("gs://"):
image = load_image_from_gcs(path=path, project=project)
elif path.startswith("data:image/"):
# extract base64 component from image uri
encoded: Any = re.search(r"data:image/\w{2,4};base64,(.*)", path)
if encoded:
encoded = encoded.group(1)
else:
raise ValueError(
"Invalid image uri. It should be in the format "
"data:image/<image_type>;base64,<base64_encoded_image>."
)
image = Image.from_bytes(base64.b64decode(encoded))
elif _is_url(path):
response = requests.get(path)
response.raise_for_status()
image = Image.from_bytes(response.content)
else:
image = Image.load_from_file(path)
else:
raise ValueError("Only text and image_url types are supported!")
return Part.from_image(image)
vertex_messages = []
for i, message in enumerate(history):
if i == 0 and isinstance(message, SystemMessage):
raise ValueError("SystemMessages are not yet supported!")
elif isinstance(message, AIMessage):
role = "model"
elif isinstance(message, HumanMessage):
role = "user"
else:
raise ValueError(
f"Unexpected message with type {type(message)} at the position {i}."
)
raw_content = message.content
if isinstance(raw_content, str):
raw_content = [raw_content]
parts = [_convert_to_prompt(part) for part in raw_content]
vertex_message = Content(role=role, parts=parts)
vertex_messages.append(vertex_message)
return vertex_messages
def _parse_examples(examples: List[BaseMessage]) -> List["InputOutputTextPair"]:
from vertexai.language_models import InputOutputTextPair
if len(examples) % 2 != 0:
raise ValueError(
f"Expect examples to have an even amount of messages, got {len(examples)}."
)
example_pairs = []
input_text = None
for i, example in enumerate(examples):
if i % 2 == 0:
if not isinstance(example, HumanMessage):
raise ValueError(
f"Expected the first message in a part to be from human, got "
f"{type(example)} for the {i}th message."
)
input_text = example.content
if i % 2 == 1:
if not isinstance(example, AIMessage):
raise ValueError(
f"Expected the second message in a part to be from AI, got "
f"{type(example)} for the {i}th message."
)
pair = InputOutputTextPair(
input_text=input_text, output_text=example.content
)
example_pairs.append(pair)
return example_pairs
def _get_question(messages: List[BaseMessage]) -> HumanMessage:
"""Get the human message at the end of a list of input messages to a chat model."""
if not messages:
raise ValueError("You should provide at least one message to start the chat!")
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
)
return question
@deprecated(
since="0.0.12",
removal="1.0",
alternative_import="langchain_google_vertexai.ChatVertexAI",
)
class ChatVertexAI(_VertexAICommon, BaseChatModel): # type: ignore[override]
"""`Vertex AI` Chat large language models API."""
model_name: str = "chat-bison"
"Underlying model name."
examples: Optional[List[BaseMessage]] = None
@classmethod
def is_lc_serializable(self) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "vertexai"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
is_gemini = is_gemini_model(values["model_name"])
cls._try_init_vertexai(values)
try:
from vertexai.language_models import ChatModel, CodeChatModel
if is_gemini:
from vertexai.preview.generative_models import (
GenerativeModel,
)
except ImportError:
raise_vertex_import_error()
if is_gemini:
values["client"] = GenerativeModel(model_name=values["model_name"])
else:
if is_codey_model(values["model_name"]):
model_cls = CodeChatModel
else:
model_cls = ChatModel
values["client"] = model_cls.from_pretrained(values["model_name"])
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
question = _get_question(messages)
params = self._prepare_params(stop=stop, stream=False, **kwargs)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
if self._is_gemini_model:
history_gemini = _parse_chat_history_gemini(messages, project=self.project)
message = history_gemini.pop()
chat = self.client.start_chat(history=history_gemini)
response = chat.send_message(message, generation_config=params)
else:
history = _parse_chat_history(messages[:-1])
examples = kwargs.get("examples") or self.examples
if examples:
params["examples"] = _parse_examples(examples)
chat = self._start_chat(history, **params)
response = chat.send_message(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Asynchronously generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if "stream" in kwargs:
kwargs.pop("stream")
logger.warning("ChatVertexAI does not currently support async streaming.")
params = self._prepare_params(stop=stop, **kwargs)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
if self._is_gemini_model:
history_gemini = _parse_chat_history_gemini(messages, project=self.project)
message = history_gemini.pop()
chat = self.client.start_chat(history=history_gemini)
response = await chat.send_message_async(message, generation_config=params)
else:
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
chat = self._start_chat(history, **params)
response = await chat.send_message_async(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._prepare_params(stop=stop, stream=True, **kwargs)
if self._is_gemini_model:
history_gemini = _parse_chat_history_gemini(messages, project=self.project)
message = history_gemini.pop()
chat = self.client.start_chat(history=history_gemini)
responses = chat.send_message(
message, stream=True, generation_config=params
)
else:
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
chat = self._start_chat(history, **params)
responses = chat.send_message_streaming(question.content, **params)
for response in responses:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=response.text))
if run_manager:
run_manager.on_llm_new_token(response.text, chunk=chunk)
yield chunk
def _start_chat(
self, history: _ChatHistory, **kwargs: Any
) -> Union[ChatSession, CodeChatSession]:
if not self.is_codey_model:
return self.client.start_chat(
context=history.context, message_history=history.history, **kwargs
)
else:
return self.client.start_chat(message_history=history.history, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/perplexity.py | """Wrapper around Perplexity APIs."""
from __future__ import annotations
import logging
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
from_env,
get_pydantic_field_names,
)
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import Self
logger = logging.getLogger(__name__)
class ChatPerplexity(BaseChatModel):
"""`Perplexity AI` Chat models API.
To use, you should have the ``openai`` python package installed, and the
environment variable ``PPLX_API_KEY`` set to your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatPerplexity
chat = ChatPerplexity(
model="llama-3.1-sonar-small-128k-online",
temperature=0.7,
)
"""
client: Any = None #: :meta private:
model: str = "llama-3.1-sonar-small-128k-online"
"""Model name."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
pplx_api_key: Optional[str] = Field(
default_factory=from_env("PPLX_API_KEY", default=None), alias="api_key"
)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = Field(
None, alias="timeout"
)
"""Timeout for requests to PerplexityChat completion API. Default is None."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
model_config = ConfigDict(
populate_by_name=True,
)
@property
def lc_secrets(self) -> Dict[str, str]:
return {"pplx_api_key": "PPLX_API_KEY"}
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not a default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
self.client = openai.OpenAI(
api_key=self.pplx_api_key, base_url="https://api.perplexity.ai"
)
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
return self
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling PerplexityChat API."""
return {
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"temperature": self.temperature,
**self.model_kwargs,
}
def _convert_message_to_dict(self, message: BaseMessage) -> Dict[str, Any]:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _convert_delta_to_message_chunk(
self, _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
additional_kwargs: Dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
default_chunk_class = AIMessageChunk
if stop:
params["stop_sequences"] = stop
stream_resp = self.client.chat.completions.create(
model=params["model"], messages=message_dicts, stream=True
)
for chunk in stream_resp:
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = self._convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if stream_iter:
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.client.chat.completions.create(
model=params["model"], messages=message_dicts
)
message = AIMessage(
content=response.choices[0].message.content,
additional_kwargs={"citations": response.citations},
)
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
pplx_creds: Dict[str, Any] = {
"api_key": self.pplx_api_key,
"api_base": "https://api.perplexity.ai",
"model": self.model,
}
return {**pplx_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "perplexitychat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/edenai.py | import json
import warnings
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from aiohttp import ClientSession
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
InvalidToolCall,
SystemMessage,
ToolCall,
ToolMessage,
)
from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call
from langchain_core.messages.tool import tool_call as create_tool_call
from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chunk
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
)
from langchain_community.utilities.requests import Requests
def _result_to_chunked_message(generated_result: ChatResult) -> ChatGenerationChunk:
message = generated_result.generations[0].message
if isinstance(message, AIMessage) and message.tool_calls is not None:
tool_call_chunks = [
create_tool_call_chunk(
name=tool_call["name"],
args=json.dumps(tool_call["args"]),
id=tool_call["id"],
index=idx,
)
for idx, tool_call in enumerate(message.tool_calls)
]
message_chunk = AIMessageChunk(
content=message.content,
tool_call_chunks=tool_call_chunks,
)
return ChatGenerationChunk(message=message_chunk)
else:
return cast(ChatGenerationChunk, generated_result.generations[0])
def _message_role(type: str) -> str:
role_mapping = {
"ai": "assistant",
"human": "user",
"chat": "user",
"AIMessageChunk": "assistant",
}
if type in role_mapping:
return role_mapping[type]
else:
raise ValueError(f"Unknown type: {type}")
def _extract_edenai_tool_results_from_messages(
messages: List[BaseMessage],
) -> Tuple[List[Dict[str, Any]], List[BaseMessage]]:
"""
Get the last langchain tools messages to transform them into edenai tool_results
Returns tool_results and messages without the extracted tool messages
"""
tool_results: List[Dict[str, Any]] = []
other_messages = messages[:]
for msg in reversed(messages):
if isinstance(msg, ToolMessage):
tool_results = [
{"id": msg.tool_call_id, "result": msg.content},
*tool_results,
]
other_messages.pop()
else:
break
return tool_results, other_messages
def _format_edenai_messages(messages: List[BaseMessage]) -> Dict[str, Any]:
system = None
formatted_messages = []
human_messages = list(filter(lambda msg: isinstance(msg, HumanMessage), messages))
last_human_message = human_messages[-1] if human_messages else ""
tool_results, other_messages = _extract_edenai_tool_results_from_messages(messages)
for i, message in enumerate(other_messages):
if isinstance(message, SystemMessage):
if i != 0:
raise ValueError("System message must be at beginning of message list.")
system = message.content
elif isinstance(message, ToolMessage):
formatted_messages.append({"role": "tool", "message": message.content})
elif message != last_human_message:
formatted_messages.append(
{
"role": _message_role(message.type),
"message": message.content,
"tool_calls": _format_tool_calls_to_edenai_tool_calls(message),
}
)
return {
"text": getattr(last_human_message, "content", ""),
"previous_history": formatted_messages,
"chatbot_global_action": system,
"tool_results": tool_results,
}
def _format_tool_calls_to_edenai_tool_calls(message: BaseMessage) -> List:
tool_calls = getattr(message, "tool_calls", [])
invalid_tool_calls = getattr(message, "invalid_tool_calls", [])
edenai_tool_calls = []
for invalid_tool_call in invalid_tool_calls:
edenai_tool_calls.append(
{
"arguments": invalid_tool_call.get("args"),
"id": invalid_tool_call.get("id"),
"name": invalid_tool_call.get("name"),
}
)
for tool_call in tool_calls:
tool_args = tool_call.get("args", {})
try:
arguments = json.dumps(tool_args)
except TypeError:
arguments = str(tool_args)
edenai_tool_calls.append(
{
"arguments": arguments,
"id": tool_call["id"],
"name": tool_call["name"],
}
)
return edenai_tool_calls
def _extract_tool_calls_from_edenai_response(
provider_response: Dict[str, Any],
) -> Tuple[List[ToolCall], List[InvalidToolCall]]:
tool_calls = []
invalid_tool_calls = []
message = provider_response.get("message", {})[1]
if raw_tool_calls := message.get("tool_calls"):
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(
create_tool_call(
name=raw_tool_call["name"],
args=json.loads(raw_tool_call["arguments"]),
id=raw_tool_call["id"],
)
)
except json.JSONDecodeError as exc:
invalid_tool_calls.append(
create_invalid_tool_call(
name=raw_tool_call.get("name"),
args=raw_tool_call.get("arguments"),
id=raw_tool_call.get("id"),
error=f"Received JSONDecodeError {exc}",
)
)
return tool_calls, invalid_tool_calls
class ChatEdenAI(BaseChatModel):
"""`EdenAI` chat large language models.
`EdenAI` is a versatile platform that allows you to access various language models
from different providers such as Google, OpenAI, Cohere, Mistral and more.
To get started, make sure you have the environment variable ``EDENAI_API_KEY``
set with your API key, or pass it as a named parameter to the constructor.
Additionally, `EdenAI` provides the flexibility to choose from a variety of models,
including the ones like "gpt-4".
Example:
.. code-block:: python
from langchain_community.chat_models import ChatEdenAI
from langchain_core.messages import HumanMessage
# Initialize `ChatEdenAI` with the desired configuration
chat = ChatEdenAI(
provider="openai",
model="gpt-4",
max_tokens=256,
temperature=0.75)
# Create a list of messages to interact with the model
messages = [HumanMessage(content="hello")]
# Invoke the model with the provided messages
chat.invoke(messages)
`EdenAI` goes beyond mere model invocation. It empowers you with advanced features :
- **Multiple Providers**: access to a diverse range of llms offered by various
providers giving you the freedom to choose the best-suited model for your use case.
- **Fallback Mechanism**: Set a fallback mechanism to ensure seamless operations
even if the primary provider is unavailable, you can easily switches to an
alternative provider.
- **Usage Statistics**: Track usage statistics on a per-project
and per-API key basis.
This feature allows you to monitor and manage resource consumption effectively.
- **Monitoring and Observability**: `EdenAI` provides comprehensive monitoring
and observability tools on the platform.
Example of setting up a fallback mechanism:
.. code-block:: python
# Initialize `ChatEdenAI` with a fallback provider
chat_with_fallback = ChatEdenAI(
provider="openai",
model="gpt-4",
max_tokens=256,
temperature=0.75,
fallback_provider="google")
you can find more details here : https://docs.edenai.co/reference/text_chat_create
"""
provider: str = "openai"
"""chat provider to use (eg: openai,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-4' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = 0
"""A non-negative float that tunes the degree of randomness in generation."""
streaming: bool = False
"""Whether to stream the results."""
fallback_providers: Optional[str] = None
"""Providers in this will be used as fallback if the call to provider fails."""
edenai_api_url: str = "https://api.edenai.run/v2"
edenai_api_key: Optional[SecretStr] = Field(None, description="EdenAI API Token")
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "edenai_api_key", "EDENAI_API_KEY")
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "edenai-chat"
@property
def _api_key(self) -> str:
if self.edenai_api_key:
return self.edenai_api_key.get_secret_value()
return ""
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Call out to EdenAI's chat endpoint."""
if "available_tools" in kwargs:
yield self._stream_with_tools_as_generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return
url = f"{self.edenai_api_url}/text/chat/stream"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
payload: Dict[str, Any] = {
"providers": self.provider,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"fallback_providers": self.fallback_providers,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload, stream=True)
response.raise_for_status()
for chunk_response in response.iter_lines():
chunk = json.loads(chunk_response.decode())
token = chunk["text"]
cg_chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
if run_manager:
run_manager.on_llm_new_token(token, chunk=cg_chunk)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if "available_tools" in kwargs:
yield await self._astream_with_tools_as_agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return
url = f"{self.edenai_api_url}/text/chat/stream"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
payload: Dict[str, Any] = {
"providers": self.provider,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"fallback_providers": self.fallback_providers,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
response.raise_for_status()
async for chunk_response in response.content:
chunk = json.loads(chunk_response.decode())
token = chunk["text"]
cg_chunk = ChatGenerationChunk(
message=AIMessageChunk(content=token)
)
if run_manager:
await run_manager.on_llm_new_token(
token=chunk["text"], chunk=cg_chunk
)
yield cg_chunk
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[
Union[dict, str, Literal["auto", "none", "required", "any"], bool]
] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
formatted_tools = [convert_to_openai_tool(tool)["function"] for tool in tools]
formatted_tool_choice = "required" if tool_choice == "any" else tool_choice
return super().bind(
available_tools=formatted_tools, tool_choice=formatted_tool_choice, **kwargs
)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
llm = self.bind_tools([schema], tool_choice="required")
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], first_tool_only=True
)
else:
key_name = convert_to_openai_tool(schema)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to EdenAI's chat endpoint."""
if self.streaming:
if "available_tools" in kwargs:
warnings.warn(
"stream: Tool use is not yet supported in streaming mode."
)
else:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
url = f"{self.edenai_api_url}/text/chat"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
payload: Dict[str, Any] = {
"providers": self.provider,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"fallback_providers": self.fallback_providers,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
response.raise_for_status()
data = response.json()
provider_response = data[self.provider]
if self.fallback_providers:
fallback_response = data.get(self.fallback_providers)
if fallback_response:
provider_response = fallback_response
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
tool_calls, invalid_tool_calls = _extract_tool_calls_from_edenai_response(
provider_response
)
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content=provider_response["generated_text"] or "",
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
)
)
],
llm_output=data,
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
if "available_tools" in kwargs:
warnings.warn(
"stream: Tool use is not yet supported in streaming mode."
)
else:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
url = f"{self.edenai_api_url}/text/chat"
headers = {
"Authorization": f"Bearer {self._api_key}",
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
payload: Dict[str, Any] = {
"providers": self.provider,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"fallback_providers": self.fallback_providers,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
response.raise_for_status()
data = await response.json()
provider_response = data[self.provider]
if self.fallback_providers:
fallback_response = data.get(self.fallback_providers)
if fallback_response:
provider_response = fallback_response
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content=provider_response["generated_text"]
)
)
],
llm_output=data,
)
def _stream_with_tools_as_generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]],
run_manager: Optional[CallbackManagerForLLMRun],
**kwargs: Any,
) -> ChatGenerationChunk:
warnings.warn("stream: Tool use is not yet supported in streaming mode.")
result = self._generate(messages, stop=stop, run_manager=run_manager, **kwargs)
return _result_to_chunked_message(result)
async def _astream_with_tools_as_agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]],
run_manager: Optional[AsyncCallbackManagerForLLMRun],
**kwargs: Any,
) -> ChatGenerationChunk:
warnings.warn("stream: Tool use is not yet supported in streaming mode.")
result = await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _result_to_chunked_message(result)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/anyscale.py | """Anyscale Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from __future__ import annotations
import logging
import os
import sys
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Set,
Type,
Union,
)
import requests
from langchain_core.messages import BaseMessage
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, SecretStr, model_validator
from langchain_community.adapters.openai import convert_message_to_dict
from langchain_community.chat_models.openai import (
ChatOpenAI,
_import_tiktoken,
)
from langchain_community.utils.openai import is_openai_v1
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "meta-llama/Meta-Llama-3-8B-Instruct"
class ChatAnyscale(ChatOpenAI):
"""`Anyscale` Chat large language models.
See https://www.anyscale.com/ for information about Anyscale.
To use, you should have the ``openai`` python package installed, and the
environment variable ``ANYSCALE_API_KEY`` set with your API key.
Alternatively, you can use the anyscale_api_key keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatAnyscale
chat = ChatAnyscale(model_name="meta-llama/Llama-2-7b-chat-hf")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anyscale-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
anyscale_api_key: SecretStr = Field(default=SecretStr(""))
"""AnyScale Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
anyscale_proxy: Optional[str] = None
"""To support explicit proxy for Anyscale."""
available_models: Optional[Set[str]] = None
"""Available models from Anyscale API."""
@staticmethod
def get_available_models(
anyscale_api_key: Optional[str] = None,
anyscale_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Anyscale API."""
try:
anyscale_api_key = anyscale_api_key or os.environ["ANYSCALE_API_KEY"]
except KeyError as e:
raise ValueError(
"Anyscale API key must be passed as keyword argument or "
"set in environment variable ANYSCALE_API_KEY.",
) from e
models_url = f"{anyscale_api_base}/models"
models_response = requests.get(
models_url,
headers={
"Authorization": f"Bearer {anyscale_api_key}",
},
)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}",
)
return {model["id"] for model in models_response.json()["data"]}
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["anyscale_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"anyscale_proxy",
"ANYSCALE_PROXY",
default="",
)
try:
import openai
except ImportError as e:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`.",
) from e
try:
if is_openai_v1():
client_params = {
"api_key": values["anyscale_api_key"].get_secret_value(),
"base_url": values["anyscale_api_base"],
# To do: future support
# "organization": values["openai_organization"],
# "timeout": values["request_timeout"],
# "max_retries": values["max_retries"],
# "default_headers": values["default_headers"],
# "default_query": values["default_query"],
# "http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`.",
) from exc
if "model_name" not in values.keys():
values["model_name"] = DEFAULT_MODEL
model_name = values["model_name"]
available_models = cls.get_available_models(
values["anyscale_api_key"].get_secret_value(),
values["anyscale_api_base"],
)
if model_name not in available_models:
raise ValueError(
f"Model name {model_name} not found in available models: "
f"{available_models}.",
)
values["available_models"] = available_models
return values
def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]
] = None,
) -> int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
"""
if tools is not None:
warnings.warn(
"Counting tokens in tool schemas is not yet supported. Ignoring tools."
)
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/minimax.py | """Wrapper around Minimax chat models."""
import json
import logging
from contextlib import asynccontextmanager, contextmanager
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import get_fields
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
logger = logging.getLogger(__name__)
@contextmanager
def connect_httpx_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
"""Context manager for connecting to an SSE stream.
Args:
client: The httpx client.
method: The HTTP method.
url: The URL to connect to.
kwargs: Additional keyword arguments to pass to the client.
Yields:
An EventSource object.
"""
from httpx_sse import EventSource
with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
@asynccontextmanager
async def aconnect_httpx_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
"""Async context manager for connecting to an SSE stream.
Args:
client: The httpx client.
method: The HTTP method.
url: The URL to connect to.
kwargs: Additional keyword arguments to pass to the client.
Yields:
An EventSource object.
"""
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
def _convert_message_to_dict(message: BaseMessage) -> Dict[str, Any]:
"""Convert a LangChain messages to Dict."""
message_dict: Dict[str, Any]
if isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {
"role": "assistant",
"content": message.content,
"tool_calls": message.additional_kwargs.get("tool_calls"),
}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
"name": message.name or message.additional_kwargs.get("name"),
}
else:
raise TypeError(f"Got unknown type '{message.__class__.__name__}'.")
return message_dict
def _convert_dict_to_message(dct: Dict[str, Any]) -> BaseMessage:
"""Convert a dict to LangChain message."""
role = dct.get("role")
content = dct.get("content", "")
if role == "assistant":
additional_kwargs = {}
tool_calls = dct.get("tool_calls", None)
if tool_calls is not None:
additional_kwargs["tool_calls"] = tool_calls
return AIMessage(content=content, additional_kwargs=additional_kwargs)
return ChatMessage(role=role, content=content) # type: ignore[arg-type]
def _convert_delta_to_message_chunk(
dct: Dict[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = dct.get("role")
content = dct.get("content", "")
additional_kwargs = {}
tool_calls = dct.get("tool_call", None)
if tool_calls is not None:
additional_kwargs["tool_calls"] = tool_calls
if role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
if role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
return default_class(content=content) # type: ignore[call-arg]
class MiniMaxChat(BaseChatModel):
"""MiniMax chat model integration.
Setup:
To use, you should have the environment variable``MINIMAX_API_KEY`` set with
your API KEY.
.. code-block:: bash
export MINIMAX_API_KEY="your-api-key"
Key init args — completion params:
model: Optional[str]
Name of MiniMax model to use.
max_tokens: Optional[int]
Max number of tokens to generate.
temperature: Optional[float]
Sampling temperature.
top_p: Optional[float]
Total probability mass of tokens to consider at each step.
streaming: Optional[bool]
Whether to stream the results or not.
Key init args — client params:
api_key: Optional[str]
MiniMax API key. If not passed in will be read from env var MINIMAX_API_KEY.
base_url: Optional[str]
Base URL for API requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.chat_models import MiniMaxChat
chat = MiniMaxChat(
api_key=api_key,
model='abab6.5-chat',
# temperature=...,
# other params...
)
Invoke:
.. code-block:: python
messages = [
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
("human", "我喜欢编程。"),
]
chat.invoke(messages)
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'token_usage': {'total_tokens': 48},
'model_name': 'abab6.5-chat',
'finish_reason': 'stop'
},
id='run-42d62ba6-5dc1-4e16-98dc-f72708a4162d-0'
)
Stream:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk)
.. code-block:: python
content='I' id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522'
content=' enjoy programming.' response_metadata={'finish_reason': 'stop'} id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522'
.. code-block:: python
stream = chat.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
AIMessageChunk(
content='I enjoy programming.',
response_metadata={'finish_reason': 'stop'},
id='run-01aed0a0-61c4-4709-be22-c6d8b17155d6'
)
Async:
.. code-block:: python
await chat.ainvoke(messages)
# stream
# async for chunk in chat.astream(messages):
# print(chunk)
# batch
# await chat.abatch([messages])
.. code-block:: python
AIMessage(
content='I enjoy programming.',
response_metadata={
'token_usage': {'total_tokens': 48},
'model_name': 'abab6.5-chat',
'finish_reason': 'stop'
},
id='run-c263b6f1-1736-4ece-a895-055c26b3436f-0'
)
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
chat_with_tools = chat.bind_tools([GetWeather, GetPopulation])
ai_msg = chat_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
.. code-block:: python
[
{
'name': 'GetWeather',
'args': {'location': 'LA'},
'id': 'call_function_2140449382',
'type': 'tool_call'
}
]
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_chat = chat.with_structured_output(Joke)
structured_chat.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(
setup='Why do cats have nine lives?',
punchline='Because they are so cute and cuddly!',
rating=None
)
Response metadata
.. code-block:: python
ai_msg = chat.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{'token_usage': {'total_tokens': 48},
'model_name': 'abab6.5-chat',
'finish_reason': 'stop'}
""" # noqa: E501
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "minimax"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
_client: Any = None
model: str = "abab6.5-chat"
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.7
"""A non-negative float that tunes the degree of randomness in generation."""
top_p: float = 0.95
"""Total probability mass of tokens to consider at each step."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
minimax_api_host: str = Field(
default="https://api.minimax.chat/v1/text/chatcompletion_v2", alias="base_url"
)
minimax_group_id: Optional[str] = Field(default=None, alias="group_id")
"""[DEPRECATED, keeping it for for backward compatibility] Group Id"""
minimax_api_key: SecretStr = Field(alias="api_key")
"""Minimax API Key"""
streaming: bool = False
"""Whether to stream the results or not."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["minimax_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
["minimax_api_key", "api_key"],
"MINIMAX_API_KEY",
)
)
default_values = {
name: field.default
for name, field in get_fields(cls).items()
if field.default is not None
}
default_values.update(values)
# Get custom api url from environment.
values["minimax_api_host"] = get_from_dict_or_env(
values,
["minimax_api_host", "base_url"],
"MINIMAX_API_HOST",
default_values["minimax_api_host"],
)
return values
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
generations.append(
ChatGeneration(message=message, generation_info=generation_info)
)
token_usage = response.get("usage", {})
llm_output = {
"token_usage": token_usage,
"model_name": self.model,
}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_payload_parameters( # type: ignore[no-untyped-def]
self, messages: List[BaseMessage], is_stream: bool = False, **kwargs
) -> Dict[str, Any]:
"""Create API request body parameters."""
message_dicts = [_convert_message_to_dict(m) for m in messages]
payload = self._default_params
payload["messages"] = message_dicts
self._reformat_function_parameters(kwargs.get("tools", {}))
payload.update(**kwargs)
if is_stream:
payload["stream"] = True
return payload
@staticmethod
def _reformat_function_parameters(tools_arg: Dict[Any, Any]) -> None:
"""Reformat the function parameters to strings."""
for tool_arg in tools_arg:
if tool_arg["type"] == "function" and not isinstance(
tool_arg["function"]["parameters"], str
):
tool_arg["function"]["parameters"] = json.dumps(
tool_arg["function"]["parameters"]
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to stream the results or not.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
is_stream = stream if stream is not None else self.streaming
if is_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
payload = self._create_payload_parameters(messages, **kwargs)
api_key = ""
if self.minimax_api_key is not None:
api_key = self.minimax_api_key.get_secret_value()
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
import httpx
with httpx.Client(headers=headers, timeout=60) as client:
response = client.post(self.minimax_api_host, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the chat response in chunks."""
payload = self._create_payload_parameters(messages, is_stream=True, **kwargs)
api_key = ""
if self.minimax_api_key is not None:
api_key = self.minimax_api_key.get_secret_value()
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
import httpx
with httpx.Client(headers=headers, timeout=60) as client:
with connect_httpx_sse(
client, "POST", self.minimax_api_host, json=payload
) as event_source:
for sse in event_source.iter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], AIMessageChunk
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{"finish_reason": finish_reason}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
is_stream = stream if stream is not None else self.streaming
if is_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
payload = self._create_payload_parameters(messages, **kwargs)
api_key = ""
if self.minimax_api_key is not None:
api_key = self.minimax_api_key.get_secret_value()
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
import httpx
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
response = await client.post(self.minimax_api_host, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
payload = self._create_payload_parameters(messages, is_stream=True, **kwargs)
api_key = ""
if self.minimax_api_key is not None:
api_key = self.minimax_api_key.get_secret_value()
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
import httpx
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
async with aconnect_httpx_sse(
client, "POST", self.minimax_api_host, json=payload
) as event_source:
async for sse in event_source.aiter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], AIMessageChunk
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{"finish_reason": finish_reason}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class: `~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a dict or a Pydantic class. If a Pydantic class
then the model output will be an object of that class. If a dict then
the model output will be a dict. With a Pydantic class the returned
attributes will be validated, whereas with a dict they will not be. If
`method` is "function_calling" and `schema` is a dict, then the dict
must match the OpenAI function-calling spec.
include_raw: If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes any ChatModel input and returns as output:
If include_raw is True then a dict with keys:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
If include_raw is False then just _DictOrPydantic is returned,
where _DictOrPydantic depends on the schema:
If schema is a Pydantic class then _DictOrPydantic is the Pydantic
class.
If schema is a dict then _DictOrPydantic is a dict.
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_community.chat_models import MiniMaxChat
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = MiniMaxChat()
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='A pound of bricks and a pound of feathers weigh the same.',
# justification='The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.'
# )
Example: Function-calling, Pydantic schema (method="function_calling", include_raw=True):
.. code-block:: python
from langchain_community.chat_models import MiniMaxChat
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = MiniMaxChat()
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_function_8953642285', 'type': 'function', 'function': {'name': 'AnswerWithJustification', 'arguments': '{"answer": "A pound of bricks and a pound of feathers weigh the same.", "justification": "The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same."}'}}]}, response_metadata={'token_usage': {'total_tokens': 257}, 'model_name': 'abab6.5-chat', 'finish_reason': 'tool_calls'}, id='run-d897e037-2796-49f5-847e-f9f69dd390db-0', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'A pound of bricks and a pound of feathers weigh the same.', 'justification': 'The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.'}, 'id': 'call_function_8953642285', 'type': 'tool_call'}]),
# 'parsed': AnswerWithJustification(answer='A pound of bricks and a pound of feathers weigh the same.', justification='The weight of the feathers is much less dense than the weight of the bricks, but since both weigh one pound, they weigh the same.'),
# 'parsing_error': None
# }
Example: Function-calling, dict schema (method="function_calling", include_raw=False):
.. code-block:: python
from langchain_community.chat_models import MiniMaxChat
from pydantic import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = MiniMaxChat()
structured_llm = llm.with_structured_output(dict_schema)
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'A pound of bricks and a pound of feathers both weigh the same, which is a pound.',
# 'justification': 'The difference is that bricks are much denser than feathers, so a pound of bricks will take up much less space than a pound of feathers.'
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
llm = self.bind_tools([schema])
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
key_name = convert_to_openai_tool(schema)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/konko.py | """KonkoAI chat wrapper."""
from __future__ import annotations
import logging
import os
import warnings
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
import requests
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.messages import AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, ChatResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.adapters.openai import (
convert_message_to_dict,
)
from langchain_community.chat_models.openai import (
ChatOpenAI,
_convert_delta_to_message_chunk,
generate_from_stream,
)
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://api.konko.ai/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
logger = logging.getLogger(__name__)
class ChatKonko(ChatOpenAI): # type: ignore[override]
"""`ChatKonko` Chat large language models API.
To use, you should have the ``konko`` python package installed, and the
environment variable ``KONKO_API_KEY`` and ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the konko.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatKonko
llm = ChatKonko(model="meta-llama/Llama-2-13b-chat-hf")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"konko_api_key": "KONKO_API_KEY", "openai_api_key": "OPENAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
konko_api_key: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 20
"""Maximum number of tokens to generate."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["konko_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "konko_api_key", "KONKO_API_KEY")
)
try:
import konko
except ImportError:
raise ImportError(
"Could not import konko python package. "
"Please install it with `pip install konko`."
)
try:
if is_openai_v1():
values["client"] = konko.chat.completions
else:
values["client"] = konko.ChatCompletion
except AttributeError:
raise ValueError(
"`konko` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if not hasattr(konko, "_is_legacy_openai"):
warnings.warn(
"You are using an older version of the 'konko' package. "
"Please consider upgrading to access new features."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Konko API."""
return {
"model": self.model,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
@staticmethod
def get_available_models(
konko_api_key: Union[str, SecretStr, None] = None,
openai_api_key: Union[str, SecretStr, None] = None,
konko_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Konko API."""
# Try to retrieve the OpenAI API key if it's not passed as an argument
if not openai_api_key:
try:
openai_api_key = convert_to_secret_str(os.environ["OPENAI_API_KEY"])
except KeyError:
pass # It's okay if it's not set, we just won't use it
elif isinstance(openai_api_key, str):
openai_api_key = convert_to_secret_str(openai_api_key)
# Try to retrieve the Konko API key if it's not passed as an argument
if not konko_api_key:
try:
konko_api_key = convert_to_secret_str(os.environ["KONKO_API_KEY"])
except KeyError:
raise ValueError(
"Konko API key must be passed as keyword argument or "
"set in environment variable KONKO_API_KEY."
)
elif isinstance(konko_api_key, str):
konko_api_key = convert_to_secret_str(konko_api_key)
models_url = f"{konko_api_base}/models"
headers = {
"Authorization": f"Bearer {konko_api_key.get_secret_value()}",
}
if openai_api_key:
headers["X-OpenAI-Api-Key"] = cast(
SecretStr, openai_api_key
).get_secret_value()
models_response = requests.get(models_url, headers=headers)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}"
)
return {model["id"] for model in models_response.json()["data"]}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model}, **self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the konko client."""
return {**self._default_params}
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "konko-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/cloudflare_workersai.py | import logging
from operator import itemgetter
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Type,
Union,
cast,
)
from uuid import uuid4
import requests
from langchain.schema import AIMessage, ChatGeneration, ChatResult, HumanMessage
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessageChunk,
BaseMessage,
SystemMessage,
ToolCall,
ToolMessage,
)
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import (
JsonOutputParser,
PydanticOutputParser,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.runnables.base import RunnableMap
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
# Initialize logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_logger = logging.getLogger(__name__)
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
def _convert_messages_to_cloudflare_messages(
messages: List[BaseMessage],
) -> List[Dict[str, Any]]:
"""Convert LangChain messages to Cloudflare Workers AI format."""
cloudflare_messages = []
msg: Dict[str, Any]
for message in messages:
# Base structure for each message
msg = {
"role": "",
"content": message.content if isinstance(message.content, str) else "",
}
# Determine role and additional fields based on message type
if isinstance(message, HumanMessage):
msg["role"] = "user"
elif isinstance(message, AIMessage):
msg["role"] = "assistant"
# If the AIMessage includes tool calls, format them as needed
if message.tool_calls:
tool_calls = [
{"name": tool_call["name"], "arguments": tool_call["args"]}
for tool_call in message.tool_calls
]
msg["tool_calls"] = tool_calls
elif isinstance(message, SystemMessage):
msg["role"] = "system"
elif isinstance(message, ToolMessage):
msg["role"] = "tool"
msg["tool_call_id"] = (
message.tool_call_id
) # Use tool_call_id if it's a ToolMessage
# Add the formatted message to the list
cloudflare_messages.append(msg)
return cloudflare_messages
def _get_tool_calls_from_response(response: requests.Response) -> List[ToolCall]:
"""Get tool calls from ollama response."""
tool_calls = []
if "tool_calls" in response.json()["result"]:
for tc in response.json()["result"]["tool_calls"]:
tool_calls.append(
tool_call(
id=str(uuid4()),
name=tc["name"],
args=tc["arguments"],
)
)
return tool_calls
class ChatCloudflareWorkersAI(BaseChatModel):
"""Custom chat model for Cloudflare Workers AI"""
account_id: str = Field(...)
api_token: str = Field(...)
model: str = Field(...)
ai_gateway: str = ""
url: str = ""
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
gateway_url: str = "https://gateway.ai.cloudflare.com/v1"
def __init__(self, **kwargs: Any) -> None:
"""Initialize with necessary credentials."""
super().__init__(**kwargs)
if self.ai_gateway:
self.url = (
f"{self.gateway_url}/{self.account_id}/"
f"{self.ai_gateway}/workers-ai/run/{self.model}"
)
else:
self.url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate a response based on the messages provided."""
formatted_messages = _convert_messages_to_cloudflare_messages(messages)
headers = {"Authorization": f"Bearer {self.api_token}"}
prompt = "\n".join(
f"role: {msg['role']}, content: {msg['content']}"
+ (f", tools: {msg['tool_calls']}" if "tool_calls" in msg else "")
+ (
f", tool_call_id: {msg['tool_call_id']}"
if "tool_call_id" in msg
else ""
)
for msg in formatted_messages
)
# Initialize `data` with `prompt`
data = {
"prompt": prompt,
"tools": kwargs["tools"] if "tools" in kwargs else None,
**{key: value for key, value in kwargs.items() if key not in ["tools"]},
}
# Ensure `tools` is a list if it's included in `kwargs`
if data["tools"] is not None and not isinstance(data["tools"], list):
data["tools"] = [data["tools"]]
_logger.info(f"Sending prompt to Cloudflare Workers AI: {data}")
response = requests.post(self.url, headers=headers, json=data)
tool_calls = _get_tool_calls_from_response(response)
ai_message = AIMessage(
content=str(response.json()), tool_calls=cast(AIMessageChunk, tool_calls)
)
chat_generation = ChatGeneration(message=ai_message)
return ChatResult(generations=[chat_generation])
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type, Callable[..., Any], BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tools for use in model generation."""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
method: Optional[Literal["json_mode", "function_calling"]] = "function_calling",
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema."""
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
raise ValueError(
"schema must be specified when method is 'function_calling'. "
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
llm = self.bind_tools([schema], tool_choice=tool_name)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_mode":
llm = self.bind(response_format={"type": "json_object"})
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
else:
raise ValueError(
f"Unrecognized method argument. Expected one of 'function_calling' or "
f"'json_mode'. Received: '{method}'"
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
@property
def _llm_type(self) -> str:
"""Return the type of the LLM (for Langchain compatibility)."""
return "cloudflare-workers-ai"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/pai_eas_endpoint.py | import json
import logging
from typing import Any, AsyncIterator, Dict, List, Optional, cast
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PaiEasChatEndpoint(BaseChatModel):
"""Alibaba Cloud PAI-EAS LLM Service chat model API.
To use, must have a deployed eas chat llm service on AliCloud. One can set the
environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas
service url and service token.
Example:
.. code-block:: python
from langchain_community.chat_models import PaiEasChatEndpoint
eas_chat_endpoint = PaiEasChatEndpoint(
eas_service_url="your_service_url",
eas_service_token="your_service_token"
)
"""
"""PAI-EAS Service URL"""
eas_service_url: str
"""PAI-EAS Service TOKEN"""
eas_service_token: str
"""PAI-EAS Service Infer Params"""
max_new_tokens: Optional[int] = 512
temperature: Optional[float] = 0.8
top_p: Optional[float] = 0.1
top_k: Optional[int] = 10
do_sample: Optional[bool] = False
use_cache: Optional[bool] = True
stop_sequences: Optional[List[str]] = None
"""Enable stream chat mode."""
streaming: bool = False
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
version: Optional[str] = "2.0"
timeout: Optional[int] = 5000
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["eas_service_url"] = get_from_dict_or_env(
values, "eas_service_url", "EAS_SERVICE_URL"
)
values["eas_service_token"] = get_from_dict_or_env(
values, "eas_service_token", "EAS_SERVICE_TOKEN"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"eas_service_url": self.eas_service_url,
"eas_service_token": self.eas_service_token,
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pai_eas_chat_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": [],
"do_sample": self.do_sample,
"use_cache": self.use_cache,
}
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.model_kwargs:
params.update(self.model_kwargs)
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop"] = self.stop_sequences
else:
params["stop"] = stop_sequences
return {**params, **kwargs}
def format_request_payload(
self, messages: List[BaseMessage], **model_kwargs: Any
) -> dict:
prompt: Dict[str, Any] = {}
user_content: List[str] = []
assistant_content: List[str] = []
for message in messages:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
user_content = user_content + [content]
elif isinstance(message, AIMessage):
assistant_content = assistant_content + [content]
elif isinstance(message, SystemMessage):
prompt["system_prompt"] = content
elif isinstance(message, ChatMessage) and message.role in [
"user",
"assistant",
"system",
]:
if message.role == "system":
prompt["system_prompt"] = content
elif message.role == "user":
user_content = user_content + [content]
elif message.role == "assistant":
assistant_content = assistant_content + [content]
else:
supported = ",".join([role for role in ["user", "assistant", "system"]])
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
prompt["prompt"] = user_content[len(user_content) - 1]
history = [
history_item
for _, history_item in enumerate(zip(user_content[:-1], assistant_content))
]
prompt["history"] = history
return {**prompt, **model_kwargs}
def _format_response_payload(
self, output: bytes, stop_sequences: Optional[List[str]]
) -> str:
"""Formats response"""
try:
text = json.loads(output)["response"]
if stop_sequences:
text = enforce_stop_tokens(text, stop_sequences)
return text
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return output.decode("utf-8")
raise e
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = self._invocation_params(stop, **kwargs)
request_payload = self.format_request_payload(messages, **params)
response_payload = self._call_eas(request_payload)
generated_text = self._format_response_payload(response_payload, params["stop"])
if run_manager:
run_manager.on_llm_new_token(generated_text)
return generated_text
def _call_eas(self, query_body: dict) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"{self.eas_service_token}",
}
# make request
response = requests.post(
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return response.text
def _call_eas_stream(self, query_body: dict) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"{self.eas_service_token}",
}
# make request
response = requests.post(
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return response
def _convert_chunk_to_message_message(
self,
chunk: str,
) -> AIMessageChunk:
data = json.loads(chunk.encode("utf-8"))
return AIMessageChunk(content=data.get("response", ""))
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._invocation_params(stop, **kwargs)
request_payload = self.format_request_payload(messages, **params)
request_payload["use_stream_chat"] = True
response = self._call_eas_stream(request_payload)
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
):
if chunk:
content = self._convert_chunk_to_message_message(chunk)
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in params["stop"]:
if stop_seq in content.content:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
content.content = content.content[
: content.content.index(stop_seq_found)
]
# yield text, if any
if text:
cg_chunk = ChatGenerationChunk(message=content)
if run_manager:
await run_manager.on_llm_new_token(
cast(str, content.content), chunk=cg_chunk
)
yield cg_chunk
# break if stop sequence found
if stop_seq_found:
break
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/symblai_nebula.py | import json
import os
from json import JSONDecodeError
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
import requests
from aiohttp import ClientSession
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import convert_to_secret_str
from pydantic import ConfigDict, Field, SecretStr
def _convert_role(role: str) -> str:
map = {"ai": "assistant", "human": "human", "chat": "human"}
if role in map:
return map[role]
else:
raise ValueError(f"Unknown role type: {role}")
def _format_nebula_messages(messages: List[BaseMessage]) -> Dict[str, Any]:
system = ""
formatted_messages = []
for message in messages[:-1]:
if message.type == "system":
if isinstance(message.content, str):
system = message.content
else:
raise ValueError("System prompt must be a string")
else:
formatted_messages.append(
{
"role": _convert_role(message.type),
"text": message.content,
}
)
text = messages[-1].content
formatted_messages.append({"role": "human", "text": text})
return {"system_prompt": system, "messages": formatted_messages}
class ChatNebula(BaseChatModel):
"""`Nebula` chat large language model - https://docs.symbl.ai/docs/nebula-llm
API Reference: https://docs.symbl.ai/reference/nebula-chat
To use, set the environment variable ``NEBULA_API_KEY``,
or pass it as a named parameter to the constructor.
To request an API key, visit https://platform.symbl.ai/#/login
Example:
.. code-block:: python
from langchain_community.chat_models import ChatNebula
from langchain_core.messages import SystemMessage, HumanMessage
chat = ChatNebula(max_new_tokens=1024, temperature=0.5)
messages = [
SystemMessage(
content="You are a helpful assistant."
),
HumanMessage(
"Answer the following question. How can I help save the world."
),
]
chat.invoke(messages)
"""
max_new_tokens: int = 1024
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = 0
"""A non-negative float that tunes the degree of randomness in generation."""
streaming: bool = False
nebula_api_url: str = "https://api-nebula.symbl.ai"
nebula_api_key: Optional[SecretStr] = Field(None, description="Nebula API Token")
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
def __init__(self, **kwargs: Any) -> None:
if "nebula_api_key" in kwargs:
api_key = convert_to_secret_str(kwargs.pop("nebula_api_key"))
elif "NEBULA_API_KEY" in os.environ:
api_key = convert_to_secret_str(os.environ["NEBULA_API_KEY"])
else:
api_key = None
super().__init__(nebula_api_key=api_key, **kwargs) # type: ignore[call-arg]
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "nebula-chat"
@property
def _api_key(self) -> str:
if self.nebula_api_key:
return self.nebula_api_key.get_secret_value()
return ""
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Call out to Nebula's chat endpoint."""
url = f"{self.nebula_api_url}/v1/model/chat/streaming"
headers = {
"ApiKey": self._api_key,
"Content-Type": "application/json",
}
formatted_data = _format_nebula_messages(messages=messages)
payload: Dict[str, Any] = {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
json_payload = json.dumps(payload)
response = requests.request(
"POST", url, headers=headers, data=json_payload, stream=True
)
response.raise_for_status()
for chunk_response in response.iter_lines():
chunk_decoded = chunk_response.decode()[6:]
try:
chunk = json.loads(chunk_decoded)
except JSONDecodeError:
continue
token = chunk["delta"]
cg_chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
if run_manager:
run_manager.on_llm_new_token(token, chunk=cg_chunk)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
url = f"{self.nebula_api_url}/v1/model/chat/streaming"
headers = {"ApiKey": self._api_key, "Content-Type": "application/json"}
formatted_data = _format_nebula_messages(messages=messages)
payload: Dict[str, Any] = {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
json_payload = json.dumps(payload)
async with ClientSession() as session:
async with session.post( # type: ignore[call-arg]
url, data=json_payload, headers=headers, stream=True
) as response:
response.raise_for_status()
async for chunk_response in response.content:
chunk_decoded = chunk_response.decode()[6:]
try:
chunk = json.loads(chunk_decoded)
except JSONDecodeError:
continue
token = chunk["delta"]
cg_chunk = ChatGenerationChunk(
message=AIMessageChunk(content=token)
)
if run_manager:
await run_manager.on_llm_new_token(token, chunk=cg_chunk)
yield cg_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
url = f"{self.nebula_api_url}/v1/model/chat"
headers = {"ApiKey": self._api_key, "Content-Type": "application/json"}
formatted_data = _format_nebula_messages(messages=messages)
payload: Dict[str, Any] = {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
json_payload = json.dumps(payload)
response = requests.request("POST", url, headers=headers, data=json_payload)
response.raise_for_status()
data = response.json()
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content=data["messages"]))],
llm_output=data,
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
url = f"{self.nebula_api_url}/v1/model/chat"
headers = {"ApiKey": self._api_key, "Content-Type": "application/json"}
formatted_data = _format_nebula_messages(messages=messages)
payload: Dict[str, Any] = {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
**formatted_data,
**kwargs,
}
payload = {k: v for k, v in payload.items() if v is not None}
json_payload = json.dumps(payload)
async with ClientSession() as session:
async with session.post(
url, data=json_payload, headers=headers
) as response:
response.raise_for_status()
data = await response.json()
return ChatResult(
generations=[
ChatGeneration(message=AIMessage(content=data["messages"]))
],
llm_output=data,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/sambanova.py | import json
from operator import itemgetter
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import requests
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.output_parsers import (
JsonOutputParser,
PydanticOutputParser,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field, SecretStr
from requests import Response
def _convert_message_to_dict(message: BaseMessage) -> Dict[str, Any]:
"""
convert a BaseMessage to a dictionary with Role / content
Args:
message: BaseMessage
Returns:
messages_dict: role / content dict
"""
message_dict: Dict[str, Any] = {}
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_message_dicts(messages: List[BaseMessage]) -> List[Dict[str, Any]]:
"""
Convert a list of BaseMessages to a list of dictionaries with Role / content
Args:
messages: list of BaseMessages
Returns:
messages_dicts: list of role / content dicts
"""
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
class ChatSambaNovaCloud(BaseChatModel):
"""
SambaNova Cloud chat model.
Setup:
To use, you should have the environment variables:
`SAMBANOVA_URL` set with your SambaNova Cloud URL.
`SAMBANOVA_API_KEY` set with your SambaNova Cloud API Key.
http://cloud.sambanova.ai/
Example:
.. code-block:: python
ChatSambaNovaCloud(
sambanova_url = SambaNova cloud endpoint URL,
sambanova_api_key = set with your SambaNova cloud API key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
stream_options = include usage to get generation metrics
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct.
streaming: bool
Whether to use streaming handler when using non streaming methods
max_tokens: int
max tokens to generate
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
stream_options: dict
stream options, include usage to get generation metrics
Key init args — client params:
sambanova_url: str
SambaNova Cloud Url
sambanova_api_key: str
SambaNova Cloud api key
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatSambaNovaCloud
chat = ChatSambaNovaCloud(
sambanova_url = SambaNova cloud endpoint URL,
sambanova_api_key = set with your SambaNova cloud API key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
stream_options = include usage to get generation metrics
)
Invoke:
.. code-block:: python
messages = [
SystemMessage(content="your are an AI assistant."),
HumanMessage(content="tell me a joke."),
]
response = chat.invoke(messages)
Stream:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk.content, end="", flush=True)
Async:
.. code-block:: python
response = chat.ainvoke(messages)
await response
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
...,
description="The city and state, e.g. Los Angeles, CA"
)
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
ai_msg = llm_with_tools.invoke("Should I bring my umbrella today in LA?")
ai_msg.tool_calls
.. code-block:: none
[
{
'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': 'call_adf61180ea2b4d228a'
}
]
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
structured_model = llm.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(setup="Why did the cat join a band?",
punchline="Because it wanted to be the purr-cussionist!")
See `ChatSambanovaCloud.with_structured_output()` for more.
Token usage:
.. code-block:: python
response = chat.invoke(messages)
print(response.response_metadata["usage"]["prompt_tokens"]
print(response.response_metadata["usage"]["total_tokens"]
Response metadata
.. code-block:: python
response = chat.invoke(messages)
print(response.response_metadata)
"""
sambanova_url: str = Field(default="")
"""SambaNova Cloud Url"""
sambanova_api_key: SecretStr = Field(default=SecretStr(""))
"""SambaNova Cloud api key"""
model: str = Field(default="Meta-Llama-3.1-8B-Instruct")
"""The name of the model"""
streaming: bool = Field(default=False)
"""Whether to use streaming handler when using non streaming methods"""
max_tokens: int = Field(default=1024)
"""max tokens to generate"""
temperature: float = Field(default=0.7)
"""model temperature"""
top_p: Optional[float] = Field(default=None)
"""model top p"""
top_k: Optional[int] = Field(default=None)
"""model top k"""
stream_options: Dict[str, Any] = Field(default={"include_usage": True})
"""stream options, include usage to get generation metrics"""
additional_headers: Dict[str, Any] = Field(default={})
"""Additional headers to sent in request"""
class Config:
populate_by_name = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {"sambanova_api_key": "sambanova_api_key"}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
"model": self.model,
"streaming": self.streaming,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream_options": self.stream_options,
}
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "sambanovacloud-chatmodel"
def __init__(self, **kwargs: Any) -> None:
"""init and validate environment variables"""
kwargs["sambanova_url"] = get_from_dict_or_env(
kwargs,
"sambanova_url",
"SAMBANOVA_URL",
default="https://api.sambanova.ai/v1/chat/completions",
)
kwargs["sambanova_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "sambanova_api_key", "SAMBANOVA_API_KEY")
)
super().__init__(**kwargs)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[Any], Callable[..., Any], BaseTool]],
*,
tool_choice: Optional[Union[Dict[str, Any], bool, str]] = None,
parallel_tool_calls: Optional[bool] = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model
tool_choice: does not currently support "any", choice like
should be one of ["auto", "none", "required"]
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice:
if isinstance(tool_choice, str):
# tool_choice is a tool/function name
if tool_choice not in ("auto", "none", "required"):
tool_choice = "auto"
elif isinstance(tool_choice, bool):
if tool_choice:
tool_choice = "required"
elif isinstance(tool_choice, dict):
raise ValueError(
"tool_choice must be one of ['auto', 'none', 'required']"
)
else:
raise ValueError(
f"Unrecognized tool_choice type. Expected str, bool"
f"Received: {tool_choice}"
)
else:
tool_choice = "auto"
kwargs["tool_choice"] = tool_choice
kwargs["parallel_tool_calls"] = parallel_tool_calls
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: Optional[Union[Dict[str, Any], Type[BaseModel]]] = None,
*,
method: Literal[
"function_calling", "json_mode", "json_schema"
] = "function_calling",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict[str, Any], BaseModel]]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema:
The output schema. Can be passed in as:
- an OpenAI function/tool schema,
- a JSON Schema,
- a TypedDict class,
- or a Pydantic.BaseModel class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
for more on how to properly specify types and descriptions of
schema fields when specifying a Pydantic or TypedDict class.
method:
The method for steering model generation, either "function_calling"
"json_mode" or "json_schema".
If "function_calling" then the schema will be converted
to an OpenAI function and the returned model will make use of the
function-calling API. If "json_mode" or "json_schema" then OpenAI's
JSON mode will be used.
Note that if using "json_mode" or "json_schema" then you must include instructions
for formatting the output into the desired schema into the model call.
include_raw:
If False then only the parsed structured output is returned. If
an error occurs during model output parsing it will be raised. If True
then both the raw model response (a BaseMessage) and the parsed model
response will be returned. If an error occurs during output parsing it
will be caught and returned as well. The final output is always a dict
with keys "raw", "parsed", and "parsing_error".
Returns:
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
an instance of `schema` (i.e., a Pydantic object).
Otherwise, if `include_raw` is False then Runnable outputs a dict.
If `include_raw` is True, then Runnable outputs a dict with keys:
- `"raw"`: BaseMessage
- `"parsed"`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
- `"parsing_error"`: Optional[BaseException]
Example: schema=Pydantic class, method="function_calling", include_raw=False:
.. code-block:: python
from typing import Optional
from langchain_community.chat_models import ChatSambaNovaCloud
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str = Field(
description="A justification for the answer."
)
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same.'
# )
Example: schema=Pydantic class, method="function_calling", include_raw=True:
.. code-block:: python
from langchain_community.chat_models import ChatSambaNovaCloud
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'arguments': '{"answer": "They weigh the same.", "justification": "A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount."}', 'name': 'AnswerWithJustification'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls', 'usage': {'acceptance_rate': 5, 'completion_tokens': 53, 'completion_tokens_after_first_per_sec': 343.7964936837758, 'completion_tokens_after_first_per_sec_first_ten': 439.1205661878638, 'completion_tokens_per_sec': 162.8511306784833, 'end_time': 1731527851.0698032, 'is_last_response': True, 'prompt_tokens': 213, 'start_time': 1731527850.7137961, 'time_to_first_token': 0.20475482940673828, 'total_latency': 0.32545061111450196, 'total_tokens': 266, 'total_tokens_per_sec': 817.3283162354066}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731527850}, id='95667eaf-447f-4b53-bb6e-b6e1094ded88', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'They weigh the same.', 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'tool_call'}]),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'),
# 'parsing_error': None
# }
Example: schema=TypedDict class, method="function_calling", include_raw=False:
.. code-block:: python
# IMPORTANT: If you are using Python <=3.8, you need to import Annotated
# from typing_extensions, not from typing.
from typing_extensions import Annotated, TypedDict
from langchain_community.chat_models import ChatSambaNovaCloud
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[
Optional[str], None, "A justification for the answer."
]
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
# }
Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
.. code-block:: python
from langchain_community.chat_models import ChatSambaNovaCloud
oai_schema = {
'name': 'AnswerWithJustification',
'description': 'An answer to the user question along with justification for the answer.',
'parameters': {
'type': 'object',
'properties': {
'answer': {'type': 'string'},
'justification': {'description': 'A justification for the answer.', 'type': 'string'}
},
'required': ['answer']
}
}
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(oai_schema)
structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
# }
Example: schema=Pydantic class, method="json_mode", include_raw=True:
.. code-block::
from langchain_community.chat_models import ChatSambaNovaCloud
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(
AnswerWithJustification,
method="json_mode",
include_raw=True
)
structured_llm.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\n "answer": "They are the same weight",\n "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."\n}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
# 'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
# 'parsing_error': None
# }
Example: schema=None, method="json_mode", include_raw=True:
.. code-block::
from langchain_community.chat_models import ChatSambaNovaCloud
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)
structured_llm.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\n "answer": "They are the same weight",\n "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."\n}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 4.722222222222222, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 357.1315485254867, 'completion_tokens_after_first_per_sec_first_ten': 416.83279609305305, 'completion_tokens_per_sec': 240.92819585198137, 'end_time': 1731528164.8474727, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528164.4906917, 'time_to_first_token': 0.13837409019470215, 'total_latency': 0.3278985247892492, 'total_tokens': 149, 'total_tokens_per_sec': 454.4088757208256}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528164}, id='15261eaf-8a25-42ef-8ed5-f63d8bf5b1b0'),
# 'parsed': {
# 'answer': 'They are the same weight',
# 'justification': 'A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'},
# },
# 'parsing_error': None
# }
Example: schema=None, method="json_schema", include_raw=True:
.. code-block::
from langchain_community.chat_models import ChatSambaNovaCloud
class AnswerWithJustification(BaseModel):
answer: str
justification: str
llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
structured_llm = llm.with_structured_output(AnswerWithJustification, method="json_schema", include_raw=True)
structured_llm.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\n "answer": "They are the same weight",\n "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."\n}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
# 'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
# 'parsing_error': None
# }
""" # noqa: E501
if kwargs is not None:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
raise ValueError(
"`schema` must be specified when method is `function_calling`. "
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
llm = self.bind_tools([schema], tool_choice=tool_name)
if is_pydantic_schema:
output_parser: OutputParserLike[Any] = PydanticToolsParser(
tools=[schema],
first_tool_only=True,
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_mode":
llm = self
# TODO bind response format when json mode available by API
# llm = self.bind(response_format={"type": "json_object"})
if is_pydantic_schema:
schema = cast(Type[BaseModel], schema)
output_parser = PydanticOutputParser(pydantic_object=schema)
else:
output_parser = JsonOutputParser()
elif method == "json_schema":
if schema is None:
raise ValueError(
"`schema` must be specified when method is not `json_mode`. "
"Received None."
)
llm = self
# TODO bind response format when json schema available by API,
# update example
# llm = self.bind(
# response_format={"type": "json_object", "json_schema": schema}
# )
if is_pydantic_schema:
schema = cast(Type[BaseModel], schema)
output_parser = PydanticOutputParser(pydantic_object=schema)
else:
output_parser = JsonOutputParser()
else:
raise ValueError(
f"Unrecognized method argument. Expected one of `function_calling` or "
f"`json_mode`. Received: `{method}`"
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
def _handle_request(
self,
messages_dicts: List[Dict[str, Any]],
stop: Optional[List[str]] = None,
streaming: bool = False,
**kwargs: Any,
) -> Response:
"""
Performs a post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
streaming: wether to do a streaming call
Returns:
An iterator of response dicts.
"""
if streaming:
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": True,
"stream_options": self.stream_options,
**kwargs,
}
else:
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
**kwargs,
}
http_session = requests.Session()
response = http_session.post(
self.sambanova_url,
headers={
"Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
**self.additional_headers,
},
json=data,
stream=streaming,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.",
f"{response.text}.",
)
return response
def _process_response(self, response: Response) -> AIMessage:
"""
Process a non streaming response from the api
Args:
response: A request Response object
Returns
generation: an AIMessage with model generation
"""
try:
response_dict = response.json()
if response_dict.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.",
f"{response_dict}.",
)
except Exception as e:
raise RuntimeError(
f"Sambanova /complete call failed couldn't get JSON response {e}"
f"response: {response.text}"
)
content = response_dict["choices"][0]["message"].get("content", "")
if content is None:
content = ""
additional_kwargs: Dict[str, Any] = {}
tool_calls = []
invalid_tool_calls = []
raw_tool_calls = response_dict["choices"][0]["message"].get("tool_calls")
if raw_tool_calls:
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
if isinstance(raw_tool_call["function"]["arguments"], dict):
raw_tool_call["function"]["arguments"] = json.dumps(
raw_tool_call["function"].get("arguments", {})
)
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e:
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
message = AIMessage(
content=content,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
response_metadata={
"finish_reason": response_dict["choices"][0]["finish_reason"],
"usage": response_dict.get("usage"),
"model_name": response_dict["model"],
"system_fingerprint": response_dict["system_fingerprint"],
"created": response_dict["created"],
},
id=response_dict["id"],
)
return message
def _process_stream_response(
self, response: Response
) -> Iterator[BaseMessageChunk]:
"""
Process a streaming response from the api
Args:
response: An iterable request Response object
Yields:
generation: an AIMessageChunk with model partial generation
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
client = sseclient.SSEClient(response)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is a final event
# in that case event data response is '[DONE]'
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if len(data["choices"]) > 0:
finish_reason = data["choices"][0].get("finish_reason")
content = data["choices"][0]["delta"]["content"]
id = data["id"]
chunk = AIMessageChunk(
content=content, id=id, additional_kwargs={}
)
else:
content = ""
id = data["id"]
metadata = {
"finish_reason": finish_reason,
"usage": data.get("usage"),
"model_name": data["model"],
"system_fingerprint": data["system_fingerprint"],
"created": data["created"],
}
chunk = AIMessageChunk(
content=content,
id=id,
response_metadata=metadata,
additional_kwargs={},
)
yield chunk
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Call SambaNovaCloud models.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
Returns:
result: ChatResult with model generation
"""
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if stream_iter:
return generate_from_stream(stream_iter)
messages_dicts = _create_message_dicts(messages)
response = self._handle_request(messages_dicts, stop, streaming=False, **kwargs)
message = self._process_response(response)
generation = ChatGeneration(
message=message,
generation_info={
"finish_reason": message.response_metadata["finish_reason"]
},
)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""
Stream the output of the SambaNovaCloud chat model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
Yields:
chunk: ChatGenerationChunk with model partial generation
"""
messages_dicts = _create_message_dicts(messages)
response = self._handle_request(messages_dicts, stop, streaming=True, **kwargs)
for ai_message_chunk in self._process_stream_response(response):
chunk = ChatGenerationChunk(message=ai_message_chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
class ChatSambaStudio(BaseChatModel):
"""
SambaStudio chat model.
Setup:
To use, you should have the environment variables:
``SAMBASTUDIO_URL`` set with your SambaStudio deployed endpoint URL.
``SAMBASTUDIO_API_KEY`` set with your SambaStudio deployed endpoint Key.
https://docs.sambanova.ai/sambastudio/latest/index.html
Example:
.. code-block:: python
ChatSambaStudio(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
do_sample = wether to do sample
process_prompt = wether to process prompt
(set for CoE generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
streaming: bool
Whether to use streaming
max_tokens: inthandler when using non streaming methods
max tokens to generate
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
do_sample: bool
wether to do sample
process_prompt:
wether to process prompt (set for CoE generic v1 and v2 endpoints)
stream_options: dict
stream options, include usage to get generation metrics
special_tokens: dict
start, start_role, end_role and end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt set to false
or for StandAlone v1 and v2 endpoints) default to llama3 special tokens
model_kwargs: dict
Extra Key word arguments to pass to the model.
Key init args — client params:
sambastudio_url: str
SambaStudio endpoint Url
sambastudio_api_key: str
SambaStudio endpoint api key
Instantiate:
.. code-block:: python
from langchain_community.chat_models import ChatSambaStudio
chat = ChatSambaStudio=(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
do_sample = wether to do sample
process_prompt = wether to process prompt
(set for CoE generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, and special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
Invoke:
.. code-block:: python
messages = [
SystemMessage(content="your are an AI assistant."),
HumanMessage(content="tell me a joke."),
]
response = chat.invoke(messages)
Stream:
.. code-block:: python
for chunk in chat.stream(messages):
print(chunk.content, end="", flush=True)
Async:
.. code-block:: python
response = chat.ainvoke(messages)
await response
Token usage:
.. code-block:: python
response = chat.invoke(messages)
print(response.response_metadata["usage"]["prompt_tokens"]
print(response.response_metadata["usage"]["total_tokens"]
Response metadata
.. code-block:: python
response = chat.invoke(messages)
print(response.response_metadata)
"""
sambastudio_url: str = Field(default="")
"""SambaStudio Url"""
sambastudio_api_key: SecretStr = Field(default=SecretStr(""))
"""SambaStudio api key"""
base_url: str = Field(default="", exclude=True)
"""SambaStudio non streaming Url"""
streaming_url: str = Field(default="", exclude=True)
"""SambaStudio streaming Url"""
model: Optional[str] = Field(default=None)
"""The name of the model or expert to use (for CoE endpoints)"""
streaming: bool = Field(default=False)
"""Whether to use streaming handler when using non streaming methods"""
max_tokens: int = Field(default=1024)
"""max tokens to generate"""
temperature: Optional[float] = Field(default=0.7)
"""model temperature"""
top_p: Optional[float] = Field(default=None)
"""model top p"""
top_k: Optional[int] = Field(default=None)
"""model top k"""
do_sample: Optional[bool] = Field(default=None)
"""whether to do sampling"""
process_prompt: Optional[bool] = Field(default=True)
"""whether process prompt (for CoE generic v1 and v2 endpoints)"""
stream_options: Dict[str, Any] = Field(default={"include_usage": True})
"""stream options, include usage to get generation metrics"""
special_tokens: Dict[str, Any] = Field(
default={
"start": "<|begin_of_text|>",
"start_role": "<|begin_of_text|><|start_header_id|>{role}<|end_header_id|>",
"end_role": "<|eot_id|>",
"end": "<|start_header_id|>assistant<|end_header_id|>\n",
}
)
"""start, start_role, end_role and end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt set to false
or for StandAlone v1 and v2 endpoints)
default to llama3 special tokens"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Key word arguments to pass to the model."""
class Config:
populate_by_name = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"sambastudio_url": "sambastudio_url",
"sambastudio_api_key": "sambastudio_api_key",
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
"model": self.model,
"streaming": self.streaming,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
"process_prompt": self.process_prompt,
"stream_options": self.stream_options,
"special_tokens": self.special_tokens,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "sambastudio-chatmodel"
def __init__(self, **kwargs: Any) -> None:
"""init and validate environment variables"""
kwargs["sambastudio_url"] = get_from_dict_or_env(
kwargs, "sambastudio_url", "SAMBASTUDIO_URL"
)
kwargs["sambastudio_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "sambastudio_api_key", "SAMBASTUDIO_API_KEY")
)
kwargs["base_url"], kwargs["streaming_url"] = self._get_sambastudio_urls(
kwargs["sambastudio_url"]
)
super().__init__(**kwargs)
def _get_role(self, message: BaseMessage) -> str:
"""
Get the role of LangChain BaseMessage
Args:
message: LangChain BaseMessage
Returns:
str: Role of the LangChain BaseMessage
"""
if isinstance(message, ChatMessage):
role = message.role
elif isinstance(message, SystemMessage):
role = "system"
elif isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, ToolMessage):
role = "tool"
else:
raise TypeError(f"Got unknown type {message}")
return role
def _messages_to_string(self, messages: List[BaseMessage]) -> str:
"""
Convert a list of BaseMessages to a:
- dumped json string with Role / content dict structure
when process_prompt is true,
- string with special tokens if process_prompt is false
for generic V1 and V2 endpoints
Args:
messages: list of BaseMessages
Returns:
str: string to send as model input depending on process_prompt param
"""
if self.process_prompt:
messages_dict: Dict[str, Any] = {
"conversation_id": "sambaverse-conversation-id",
"messages": [],
}
for message in messages:
messages_dict["messages"].append(
{
"message_id": message.id,
"role": self._get_role(message),
"content": message.content,
}
)
messages_string = json.dumps(messages_dict)
else:
messages_string = self.special_tokens["start"]
for message in messages:
messages_string += self.special_tokens["start_role"].format(
role=self._get_role(message)
)
messages_string += f" {message.content} "
messages_string += self.special_tokens["end_role"]
messages_string += self.special_tokens["end"]
return messages_string
def _get_sambastudio_urls(self, url: str) -> Tuple[str, str]:
"""
Get streaming and non streaming URLs from the given URL
Args:
url: string with sambastudio base or streaming endpoint url
Returns:
base_url: string with url to do non streaming calls
streaming_url: string with url to do streaming calls
"""
if "openai" in url:
base_url = url
stream_url = url
else:
if "stream" in url:
base_url = url.replace("stream/", "")
stream_url = url
else:
base_url = url
if "generic" in url:
stream_url = "generic/stream".join(url.split("generic"))
else:
raise ValueError("Unsupported URL")
return base_url, stream_url
def _handle_request(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
streaming: Optional[bool] = False,
) -> Response:
"""
Performs a post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
streaming: wether to do a streaming call
Returns:
A request Response object
"""
# create request payload for openai compatible API
if "openai" in self.sambastudio_url:
messages_dicts = _create_message_dicts(messages)
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": streaming,
"stream_options": self.stream_options,
}
data = {key: value for key, value in data.items() if value is not None}
headers = {
"Authorization": f"Bearer "
f"{self.sambastudio_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
# create request payload for generic v1 API
elif "api/v2/predict/generic" in self.sambastudio_url:
items = [{"id": "item0", "value": self._messages_to_string(messages)}]
params: Dict[str, Any] = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {key: value for key, value in params.items() if value is not None}
data = {"items": items, "params": params}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
# create request payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
params = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {
key: {"type": type(value).__name__, "value": str(value)}
for key, value in params.items()
if value is not None
}
if streaming:
data = {
"instance": self._messages_to_string(messages),
"params": params,
}
else:
data = {
"instances": [self._messages_to_string(messages)],
"params": params,
}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
http_session = requests.Session()
if streaming:
response = http_session.post(
self.streaming_url, headers=headers, json=data, stream=True
)
else:
response = http_session.post(
self.base_url, headers=headers, json=data, stream=False
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{response.text}."
)
return response
def _process_response(self, response: Response) -> AIMessage:
"""
Process a non streaming response from the api
Args:
response: A request Response object
Returns
generation: an AIMessage with model generation
"""
# Extract json payload form response
try:
response_dict = response.json()
except Exception as e:
raise RuntimeError(
f"Sambanova /complete call failed couldn't get JSON response {e}"
f"response: {response.text}"
)
# process response payload for openai compatible API
if "openai" in self.sambastudio_url:
content = response_dict["choices"][0]["message"]["content"]
id = response_dict["id"]
response_metadata = {
"finish_reason": response_dict["choices"][0]["finish_reason"],
"usage": response_dict.get("usage"),
"model_name": response_dict["model"],
"system_fingerprint": response_dict["system_fingerprint"],
"created": response_dict["created"],
}
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
content = response_dict["items"][0]["value"]["completion"]
id = response_dict["items"][0]["id"]
response_metadata = response_dict["items"][0]
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
content = response_dict["predictions"][0]["completion"]
id = None
response_metadata = response_dict
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
return AIMessage(
content=content,
additional_kwargs={},
response_metadata=response_metadata,
id=id,
)
def _process_stream_response(
self, response: Response
) -> Iterator[BaseMessageChunk]:
"""
Process a streaming response from the api
Args:
response: An iterable request Response object
Yields:
generation: an AIMessageChunk with model partial generation
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
# process response payload for openai compatible API
if "openai" in self.sambastudio_url:
finish_reason = ""
client = sseclient.SSEClient(response)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is not a final event ("[DONE]")
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if len(data["choices"]) > 0:
finish_reason = data["choices"][0].get("finish_reason")
content = data["choices"][0]["delta"]["content"]
id = data["id"]
metadata = {}
else:
content = ""
id = data["id"]
metadata = {
"finish_reason": finish_reason,
"usage": data.get("usage"),
"model_name": data["model"],
"system_fingerprint": data["system_fingerprint"],
"created": data["created"],
}
if data.get("usage") is not None:
content = ""
id = data["id"]
metadata = {
"finish_reason": finish_reason,
"usage": data.get("usage"),
"model_name": data["model"],
"system_fingerprint": data["system_fingerprint"],
"created": data["created"],
}
yield AIMessageChunk(
content=content,
id=id,
response_metadata=metadata,
additional_kwargs={},
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
for line in response.iter_lines():
try:
data = json.loads(line)
content = data["result"]["items"][0]["value"]["stream_token"]
id = data["result"]["items"][0]["id"]
if data["result"]["items"][0]["value"]["is_last_response"]:
metadata = {
"finish_reason": data["result"]["items"][0]["value"].get(
"stop_reason"
),
"prompt": data["result"]["items"][0]["value"].get("prompt"),
"usage": {
"prompt_tokens_count": data["result"]["items"][0][
"value"
].get("prompt_tokens_count"),
"completion_tokens_count": data["result"]["items"][0][
"value"
].get("completion_tokens_count"),
"total_tokens_count": data["result"]["items"][0][
"value"
].get("total_tokens_count"),
"start_time": data["result"]["items"][0]["value"].get(
"start_time"
),
"end_time": data["result"]["items"][0]["value"].get(
"end_time"
),
"model_execution_time": data["result"]["items"][0][
"value"
].get("model_execution_time"),
"time_to_first_token": data["result"]["items"][0][
"value"
].get("time_to_first_token"),
"throughput_after_first_token": data["result"]["items"][
0
]["value"].get("throughput_after_first_token"),
"batch_size_used": data["result"]["items"][0][
"value"
].get("batch_size_used"),
},
}
else:
metadata = {}
yield AIMessageChunk(
content=content,
id=id,
response_metadata=metadata,
additional_kwargs={},
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
for line in response.iter_lines():
try:
data = json.loads(line)
content = data["result"]["responses"][0]["stream_token"]
id = None
if data["result"]["responses"][0]["is_last_response"]:
metadata = {
"finish_reason": data["result"]["responses"][0].get(
"stop_reason"
),
"prompt": data["result"]["responses"][0].get("prompt"),
"usage": {
"prompt_tokens_count": data["result"]["responses"][
0
].get("prompt_tokens_count"),
"completion_tokens_count": data["result"]["responses"][
0
].get("completion_tokens_count"),
"total_tokens_count": data["result"]["responses"][
0
].get("total_tokens_count"),
"start_time": data["result"]["responses"][0].get(
"start_time"
),
"end_time": data["result"]["responses"][0].get(
"end_time"
),
"model_execution_time": data["result"]["responses"][
0
].get("model_execution_time"),
"time_to_first_token": data["result"]["responses"][
0
].get("time_to_first_token"),
"throughput_after_first_token": data["result"][
"responses"
][0].get("throughput_after_first_token"),
"batch_size_used": data["result"]["responses"][0].get(
"batch_size_used"
),
},
}
else:
metadata = {}
yield AIMessageChunk(
content=content,
id=id,
response_metadata=metadata,
additional_kwargs={},
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Call SambaStudio models.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
Returns:
result: ChatResult with model generation
"""
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if stream_iter:
return generate_from_stream(stream_iter)
response = self._handle_request(messages, stop, streaming=False)
message = self._process_response(response)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""
Stream the output of the SambaStudio model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
Yields:
chunk: ChatGenerationChunk with model partial generation
"""
response = self._handle_request(messages, stop, streaming=True)
for ai_message_chunk in self._process_stream_response(response):
chunk = ChatGenerationChunk(message=ai_message_chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/coze.py | import json
import logging
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.coze.com"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, HumanMessage):
message_dict = {
"role": "user",
"content": message.content,
"content_type": "text",
}
else:
message_dict = {
"role": "assistant",
"content": message.content,
"content_type": "text",
}
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> Union[BaseMessage, None]:
msg_type = _dict["type"]
if msg_type != "answer":
return None
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user":
return HumanMessageChunk(content=content)
elif role == "assistant":
return AIMessageChunk(content=content)
else:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
class ChatCoze(BaseChatModel):
"""ChatCoze chat models API by coze.com
For more information, see https://www.coze.com/open/docs/chat
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"coze_api_key": "COZE_API_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
coze_api_base: str = Field(default=DEFAULT_API_BASE)
"""Coze custom endpoints"""
coze_api_key: Optional[SecretStr] = None
"""Coze API Key"""
request_timeout: int = Field(default=60, alias="timeout")
"""request timeout for chat http requests"""
bot_id: str = Field(default="")
"""The ID of the bot that the API interacts with."""
conversation_id: str = Field(default="")
"""Indicate which conversation the dialog is taking place in. If there is no need to
distinguish the context of the conversation(just a question and answer), skip this
parameter. It will be generated by the system."""
user: str = Field(default="")
"""The user who calls the API to chat with the bot."""
streaming: bool = False
"""Whether to stream the response to the client.
false: if no value is specified or set to false, a non-streaming response is
returned. "Non-streaming response" means that all responses will be returned at once
after they are all ready, and the client does not need to concatenate the content.
true: set to true, partial message deltas will be sent .
"Streaming response" will provide real-time response of the model to the client, and
the client needs to assemble the final reply based on the type of message. """
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["coze_api_base"] = get_from_dict_or_env(
values,
"coze_api_base",
"COZE_API_BASE",
DEFAULT_API_BASE,
)
values["coze_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"coze_api_key",
"COZE_API_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Coze API."""
return {
"bot_id": self.bot_id,
"conversation_id": self.conversation_id,
"user": self.user,
"streaming": self.streaming,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
r = self._chat(messages, **kwargs)
res = r.json()
if res["code"] != 0:
raise ValueError(
f"Error from Coze api response: {res['code']}: {res['msg']}, "
f"logid: {r.headers.get('X-Tt-Logid')}"
)
return self._create_chat_result(res.get("messages") or [])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
for chunk in res.iter_lines():
chunk = chunk.decode("utf-8").strip("\r\n")
parts = chunk.split("data:", 1)
chunk = parts[1] if len(parts) > 1 else None
if chunk is None:
continue
response = json.loads(chunk)
if response["event"] == "done":
break
elif (
response["event"] != "message"
or response["message"]["type"] != "answer"
):
continue
chunk = _convert_delta_to_message_chunk(response["message"])
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
parameters = {**self._default_params, **kwargs}
query = ""
chat_history = []
for msg in messages:
if isinstance(msg, HumanMessage):
query = f"{msg.content}" # overwrite, to get last user message as query
chat_history.append(_convert_message_to_dict(msg))
conversation_id = parameters.pop("conversation_id")
bot_id = parameters.pop("bot_id")
user = parameters.pop("user")
streaming = parameters.pop("streaming")
payload = {
"conversation_id": conversation_id,
"bot_id": bot_id,
"user": user,
"query": query,
"stream": streaming,
}
if chat_history:
payload["chat_history"] = chat_history
url = self.coze_api_base + "/open_api/v2/chat"
api_key = ""
if self.coze_api_key:
api_key = self.coze_api_key.get_secret_value()
res = requests.post(
url=url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
},
json=payload,
stream=streaming,
)
if res.status_code != 200:
logid = res.headers.get("X-Tt-Logid")
raise ValueError(f"Error from Coze api response: {res}, logid: {logid}")
return res
def _create_chat_result(self, messages: List[Mapping[str, Any]]) -> ChatResult:
generations = []
for c in messages:
msg = _convert_dict_to_message(c)
if msg:
generations.append(ChatGeneration(message=msg))
llm_output = {"token_usage": "", "model": ""}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "coze-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/azure_openai.py | """Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
import os
import warnings
from typing import Any, Awaitable, Callable, Dict, List, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.outputs import ChatResult
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, Field
from langchain_community.chat_models.openai import ChatOpenAI
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.10",
removal="1.0",
alternative_import="langchain_openai.AzureChatOpenAI",
)
class AzureChatOpenAI(ChatOpenAI):
"""`Azure OpenAI` Chat Completion API.
To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``AZURE_OPENAI_API_KEY``
- ``AZURE_OPENAI_ENDPOINT``
- ``AZURE_OPENAI_AD_TOKEN``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For example, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
)
Be aware the API version may change.
You can also specify the version of the model using ``model_version`` constructor
parameter, as Azure OpenAI doesn't return model version with the response.
Default is empty. When you specify the version, it will be appended to the
model name in the response. Setting correct version will help you to calculate the
cost properly. Model version is not validated, so make sure you set it correctly
to get the correct cost.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str = Field(default="", alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every sync request. For async requests,
will be invoked if `azure_ad_async_token_provider` is not provided.
"""
azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every async request.
"""
model_version: str = ""
"""Legacy, for openai<1.0.0 support."""
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "azure_openai"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION"
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
values["openai_proxy"] = get_from_dict_or_env(
values, "openai_proxy", "OPENAI_PROXY", default=""
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
)
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment_name"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment_name"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment_name"]
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).chat.completions
azure_ad_async_token_provider = values["azure_ad_async_token_provider"]
if azure_ad_async_token_provider:
client_params["azure_ad_token_provider"] = azure_ad_async_token_provider
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params
).chat.completions
else:
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
if is_openai_v1():
return super()._default_params
else:
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the config params used for the openai client."""
if is_openai_v1():
return super()._client_params
else:
return {
**super()._client_params,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
"Azure has not provided the response due to a content filter "
"being triggered"
)
chat_result = super()._create_chat_result(response)
if "model" in response:
model = response["model"]
if self.model_version:
model = f"{model}-{self.model_version}"
if chat_result.llm_output is not None and isinstance(
chat_result.llm_output, dict
):
chat_result.llm_output["model_name"] = model
return chat_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/ernie.py | import logging
import threading
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
@deprecated(
since="0.0.13",
alternative="langchain_community.chat_models.QianfanChatEndpoint",
)
class ErnieBotChat(BaseChatModel):
"""`ERNIE-Bot` large language model.
ERNIE-Bot is a large language model developed by Baidu,
covering a huge amount of Chinese data.
To use, you should have the `ernie_client_id` and `ernie_client_secret` set,
or set the environment variable `ERNIE_CLIENT_ID` and `ERNIE_CLIENT_SECRET`.
Note:
access_token will be automatically generated based on client_id and client_secret,
and will be regenerated after expiration (30 days).
Default model is `ERNIE-Bot-turbo`,
currently supported models are `ERNIE-Bot-turbo`, `ERNIE-Bot`, `ERNIE-Bot-8K`,
`ERNIE-Bot-4`, `ERNIE-Bot-turbo-AI`.
Example:
.. code-block:: python
from langchain_community.chat_models import ErnieBotChat
chat = ErnieBotChat(model_name='ERNIE-Bot')
Deprecated Note:
Please use `QianfanChatEndpoint` instead of this class.
`QianfanChatEndpoint` is a more suitable choice for production.
Always test your code after changing to `QianfanChatEndpoint`.
Example of `QianfanChatEndpoint`:
.. code-block:: python
from langchain_community.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
ernie_api_base: Optional[str] = None
"""Baidu application custom endpoints"""
ernie_client_id: Optional[str] = None
"""Baidu application client id"""
ernie_client_secret: Optional[str] = None
"""Baidu application client secret"""
access_token: Optional[str] = None
"""access token is generated by client id and client secret,
setting this value directly will cause an error"""
model_name: str = "ERNIE-Bot-turbo"
"""model name of ernie, default is `ERNIE-Bot-turbo`.
Currently supported `ERNIE-Bot-turbo`, `ERNIE-Bot`"""
system: Optional[str] = None
"""system is mainly used for model character design,
for example, you are an AI assistant produced by xxx company.
The length of the system is limiting of 1024 characters."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
streaming: Optional[bool] = False
"""streaming mode. not supported yet."""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
_lock = threading.Lock()
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["ernie_api_base"] = get_from_dict_or_env(
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
)
values["ernie_client_id"] = get_from_dict_or_env(
values,
"ernie_client_id",
"ERNIE_CLIENT_ID",
)
values["ernie_client_secret"] = get_from_dict_or_env(
values,
"ernie_client_secret",
"ERNIE_CLIENT_SECRET",
)
return values
def _chat(self, payload: object) -> dict:
base_url = f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat"
model_paths = {
"ERNIE-Bot-turbo": "eb-instant",
"ERNIE-Bot": "completions",
"ERNIE-Bot-8K": "ernie_bot_8k",
"ERNIE-Bot-4": "completions_pro",
"ERNIE-Bot-turbo-AI": "ai_apaas",
"BLOOMZ-7B": "bloomz_7b1",
"Llama-2-7b-chat": "llama_2_7b",
"Llama-2-13b-chat": "llama_2_13b",
"Llama-2-70b-chat": "llama_2_70b",
}
if self.model_name in model_paths:
url = f"{base_url}/{model_paths[self.model_name]}"
else:
raise ValueError(f"Got unknown model_name {self.model_name}")
resp = requests.post(
url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
},
params={"access_token": self.access_token},
json=payload,
)
return resp.json()
def _refresh_access_token_with_lock(self) -> None:
with self._lock:
logger.debug("Refreshing access token")
base_url: str = f"{self.ernie_api_base}/oauth/2.0/token"
resp = requests.post(
base_url,
timeout=10,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
params={
"grant_type": "client_credentials",
"client_id": self.ernie_client_id,
"client_secret": self.ernie_client_secret,
},
)
self.access_token = str(resp.json().get("access_token"))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
raise ValueError("`streaming` option currently unsupported.")
if not self.access_token:
self._refresh_access_token_with_lock()
payload = {
"messages": [_convert_message_to_dict(m) for m in messages],
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
"system": self.system,
**kwargs,
}
logger.debug(f"Payload for ernie api is {payload}")
resp = self._chat(payload)
if resp.get("error_code"):
if resp.get("error_code") == 111:
logger.debug("access_token expired, refresh it")
self._refresh_access_token_with_lock()
resp = self._chat(payload)
else:
raise ValueError(f"Error from ErnieChat api response: {resp}")
return self._create_chat_result(resp)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
if "function_call" in response:
additional_kwargs = {
"function_call": dict(response.get("function_call", {}))
}
else:
additional_kwargs = {}
generations = [
ChatGeneration(
message=AIMessage(
content=response.get("result", ""),
additional_kwargs={**additional_kwargs},
)
)
]
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "ernie-bot-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/__init__.py | """**Chat Models** are a variation on language models.
While Chat Models use language models under the hood, the interface they expose
is a bit different. Rather than expose a "text in, text out" API, they expose
an interface where "chat messages" are the inputs and outputs.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
**Main helpers:**
.. code-block::
AIMessage, BaseMessage, HumanMessage
""" # noqa: E501
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
)
from langchain_community.chat_models.anyscale import (
ChatAnyscale,
)
from langchain_community.chat_models.azure_openai import (
AzureChatOpenAI,
)
from langchain_community.chat_models.baichuan import (
ChatBaichuan,
)
from langchain_community.chat_models.baidu_qianfan_endpoint import (
QianfanChatEndpoint,
)
from langchain_community.chat_models.bedrock import (
BedrockChat,
)
from langchain_community.chat_models.cohere import (
ChatCohere,
)
from langchain_community.chat_models.coze import (
ChatCoze,
)
from langchain_community.chat_models.databricks import (
ChatDatabricks,
)
from langchain_community.chat_models.deepinfra import (
ChatDeepInfra,
)
from langchain_community.chat_models.edenai import ChatEdenAI
from langchain_community.chat_models.ernie import (
ErnieBotChat,
)
from langchain_community.chat_models.everlyai import (
ChatEverlyAI,
)
from langchain_community.chat_models.fake import (
FakeListChatModel,
)
from langchain_community.chat_models.fireworks import (
ChatFireworks,
)
from langchain_community.chat_models.friendli import (
ChatFriendli,
)
from langchain_community.chat_models.gigachat import (
GigaChat,
)
from langchain_community.chat_models.google_palm import (
ChatGooglePalm,
)
from langchain_community.chat_models.gpt_router import (
GPTRouter,
)
from langchain_community.chat_models.huggingface import (
ChatHuggingFace,
)
from langchain_community.chat_models.human import (
HumanInputChatModel,
)
from langchain_community.chat_models.hunyuan import (
ChatHunyuan,
)
from langchain_community.chat_models.javelin_ai_gateway import (
ChatJavelinAIGateway,
)
from langchain_community.chat_models.jinachat import (
JinaChat,
)
from langchain_community.chat_models.kinetica import (
ChatKinetica,
)
from langchain_community.chat_models.konko import (
ChatKonko,
)
from langchain_community.chat_models.litellm import (
ChatLiteLLM,
)
from langchain_community.chat_models.litellm_router import (
ChatLiteLLMRouter,
)
from langchain_community.chat_models.llama_edge import (
LlamaEdgeChatService,
)
from langchain_community.chat_models.llamacpp import ChatLlamaCpp
from langchain_community.chat_models.maritalk import (
ChatMaritalk,
)
from langchain_community.chat_models.minimax import (
MiniMaxChat,
)
from langchain_community.chat_models.mlflow import (
ChatMlflow,
)
from langchain_community.chat_models.mlflow_ai_gateway import (
ChatMLflowAIGateway,
)
from langchain_community.chat_models.mlx import (
ChatMLX,
)
from langchain_community.chat_models.moonshot import (
MoonshotChat,
)
from langchain_community.chat_models.naver import (
ChatClovaX,
)
from langchain_community.chat_models.oci_data_science import (
ChatOCIModelDeployment,
ChatOCIModelDeploymentTGI,
ChatOCIModelDeploymentVLLM,
)
from langchain_community.chat_models.oci_generative_ai import (
ChatOCIGenAI, # noqa: F401
)
from langchain_community.chat_models.octoai import ChatOctoAI
from langchain_community.chat_models.ollama import (
ChatOllama,
)
from langchain_community.chat_models.openai import (
ChatOpenAI,
)
from langchain_community.chat_models.outlines import ChatOutlines
from langchain_community.chat_models.pai_eas_endpoint import (
PaiEasChatEndpoint,
)
from langchain_community.chat_models.perplexity import (
ChatPerplexity,
)
from langchain_community.chat_models.premai import (
ChatPremAI,
)
from langchain_community.chat_models.promptlayer_openai import (
PromptLayerChatOpenAI,
)
from langchain_community.chat_models.reka import (
ChatReka,
)
from langchain_community.chat_models.sambanova import (
ChatSambaNovaCloud,
ChatSambaStudio,
)
from langchain_community.chat_models.snowflake import (
ChatSnowflakeCortex,
)
from langchain_community.chat_models.solar import (
SolarChat,
)
from langchain_community.chat_models.sparkllm import (
ChatSparkLLM,
)
from langchain_community.chat_models.symblai_nebula import ChatNebula
from langchain_community.chat_models.tongyi import (
ChatTongyi,
)
from langchain_community.chat_models.vertexai import (
ChatVertexAI,
)
from langchain_community.chat_models.volcengine_maas import (
VolcEngineMaasChat,
)
from langchain_community.chat_models.yandex import (
ChatYandexGPT,
)
from langchain_community.chat_models.yi import (
ChatYi,
)
from langchain_community.chat_models.yuan2 import (
ChatYuan2,
)
from langchain_community.chat_models.zhipuai import (
ChatZhipuAI,
)
__all__ = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatClovaX",
"ChatCohere",
"ChatCoze",
"ChatOctoAI",
"ChatDatabricks",
"ChatDeepInfra",
"ChatEdenAI",
"ChatEverlyAI",
"ChatFireworks",
"ChatFriendli",
"ChatGooglePalm",
"ChatHuggingFace",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKinetica",
"ChatKonko",
"ChatLiteLLM",
"ChatLiteLLMRouter",
"ChatMLX",
"ChatMLflowAIGateway",
"ChatMaritalk",
"ChatMlflow",
"ChatNebula",
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentVLLM",
"ChatOCIModelDeploymentTGI",
"ChatOllama",
"ChatOpenAI",
"ChatOutlines",
"ChatPerplexity",
"ChatReka",
"ChatPremAI",
"ChatSambaNovaCloud",
"ChatSambaStudio",
"ChatSparkLLM",
"ChatSnowflakeCortex",
"ChatTongyi",
"ChatVertexAI",
"ChatYandexGPT",
"ChatYuan2",
"ChatZhipuAI",
"ChatLlamaCpp",
"ErnieBotChat",
"FakeListChatModel",
"GPTRouter",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"LlamaEdgeChatService",
"MiniMaxChat",
"MoonshotChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"QianfanChatEndpoint",
"SolarChat",
"VolcEngineMaasChat",
"ChatYi",
]
_module_lookup = {
"AzureChatOpenAI": "langchain_community.chat_models.azure_openai",
"BedrockChat": "langchain_community.chat_models.bedrock",
"ChatAnthropic": "langchain_community.chat_models.anthropic",
"ChatAnyscale": "langchain_community.chat_models.anyscale",
"ChatBaichuan": "langchain_community.chat_models.baichuan",
"ChatClovaX": "langchain_community.chat_models.naver",
"ChatCohere": "langchain_community.chat_models.cohere",
"ChatCoze": "langchain_community.chat_models.coze",
"ChatDatabricks": "langchain_community.chat_models.databricks",
"ChatDeepInfra": "langchain_community.chat_models.deepinfra",
"ChatEverlyAI": "langchain_community.chat_models.everlyai",
"ChatEdenAI": "langchain_community.chat_models.edenai",
"ChatFireworks": "langchain_community.chat_models.fireworks",
"ChatFriendli": "langchain_community.chat_models.friendli",
"ChatGooglePalm": "langchain_community.chat_models.google_palm",
"ChatHuggingFace": "langchain_community.chat_models.huggingface",
"ChatHunyuan": "langchain_community.chat_models.hunyuan",
"ChatJavelinAIGateway": "langchain_community.chat_models.javelin_ai_gateway",
"ChatKinetica": "langchain_community.chat_models.kinetica",
"ChatKonko": "langchain_community.chat_models.konko",
"ChatLiteLLM": "langchain_community.chat_models.litellm",
"ChatLiteLLMRouter": "langchain_community.chat_models.litellm_router",
"ChatMLflowAIGateway": "langchain_community.chat_models.mlflow_ai_gateway",
"ChatMLX": "langchain_community.chat_models.mlx",
"ChatMaritalk": "langchain_community.chat_models.maritalk",
"ChatMlflow": "langchain_community.chat_models.mlflow",
"ChatNebula": "langchain_community.chat_models.symblai_nebula",
"ChatOctoAI": "langchain_community.chat_models.octoai",
"ChatOCIGenAI": "langchain_community.chat_models.oci_generative_ai",
"ChatOCIModelDeployment": "langchain_community.chat_models.oci_data_science",
"ChatOCIModelDeploymentVLLM": "langchain_community.chat_models.oci_data_science",
"ChatOCIModelDeploymentTGI": "langchain_community.chat_models.oci_data_science",
"ChatOllama": "langchain_community.chat_models.ollama",
"ChatOpenAI": "langchain_community.chat_models.openai",
"ChatOutlines": "langchain_community.chat_models.outlines",
"ChatReka": "langchain_community.chat_models.reka",
"ChatPerplexity": "langchain_community.chat_models.perplexity",
"ChatSambaNovaCloud": "langchain_community.chat_models.sambanova",
"ChatSambaStudio": "langchain_community.chat_models.sambanova",
"ChatSnowflakeCortex": "langchain_community.chat_models.snowflake",
"ChatSparkLLM": "langchain_community.chat_models.sparkllm",
"ChatTongyi": "langchain_community.chat_models.tongyi",
"ChatVertexAI": "langchain_community.chat_models.vertexai",
"ChatYandexGPT": "langchain_community.chat_models.yandex",
"ChatYuan2": "langchain_community.chat_models.yuan2",
"ChatZhipuAI": "langchain_community.chat_models.zhipuai",
"ErnieBotChat": "langchain_community.chat_models.ernie",
"FakeListChatModel": "langchain_community.chat_models.fake",
"GPTRouter": "langchain_community.chat_models.gpt_router",
"GigaChat": "langchain_community.chat_models.gigachat",
"HumanInputChatModel": "langchain_community.chat_models.human",
"JinaChat": "langchain_community.chat_models.jinachat",
"LlamaEdgeChatService": "langchain_community.chat_models.llama_edge",
"MiniMaxChat": "langchain_community.chat_models.minimax",
"MoonshotChat": "langchain_community.chat_models.moonshot",
"PaiEasChatEndpoint": "langchain_community.chat_models.pai_eas_endpoint",
"PromptLayerChatOpenAI": "langchain_community.chat_models.promptlayer_openai",
"SolarChat": "langchain_community.chat_models.solar",
"QianfanChatEndpoint": "langchain_community.chat_models.baidu_qianfan_endpoint",
"VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas",
"ChatPremAI": "langchain_community.chat_models.premai",
"ChatLlamaCpp": "langchain_community.chat_models.llamacpp",
"ChatYi": "langchain_community.chat_models.yi",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/mlflow.py | import json
import logging
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from urllib.parse import urlparse
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.language_models.base import LanguageModelInput
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
InvalidToolCall,
SystemMessage,
SystemMessageChunk,
ToolCall,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.tool import tool_call_chunk
from langchain_core.output_parsers.openai_tools import (
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import (
BaseModel,
Field,
PrivateAttr,
)
logger = logging.getLogger(__name__)
class ChatMlflow(BaseChatModel):
"""`MLflow` chat models API.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatMlflow
chat = ChatMlflow(
target_uri="http://localhost:5000",
endpoint="chat",
temperature=0.1,
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: dict = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
_client: Any = PrivateAttr()
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"target_uri": self.target_uri,
"endpoint": self.endpoint,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
}
return params
def _prepare_inputs(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
message_dicts = [
ChatMlflow._convert_message_to_dict(message) for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
"temperature": self.temperature,
"n": self.n,
**self.extra_params,
**kwargs,
}
if stop := self.stop or stop:
data["stop"] = stop
if self.max_tokens is not None:
data["max_tokens"] = self.max_tokens
return data
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
data = self._prepare_inputs(
messages,
stop,
**kwargs,
)
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return ChatMlflow._create_chat_result(resp)
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[BaseMessageChunk]:
# We need to override `stream` to handle the case
# that `self._client` does not implement `predict_stream`
if not hasattr(self._client, "predict_stream"):
# MLflow deployment client does not implement streaming,
# so use default implementation
yield cast(
BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs)
)
else:
yield from super().stream(input, config, stop=stop, **kwargs)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
data = self._prepare_inputs(
messages,
stop,
**kwargs,
)
# TODO: check if `_client.predict_stream` is available.
chunk_iter = self._client.predict_stream(endpoint=self.endpoint, inputs=data)
first_chunk_role = None
for chunk in chunk_iter:
if chunk["choices"]:
choice = chunk["choices"][0]
chunk_delta = choice["delta"]
if first_chunk_role is None:
first_chunk_role = chunk_delta.get("role")
chunk_message = ChatMlflow._convert_delta_to_message_chunk(
chunk_delta, first_chunk_role
)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
if logprobs := choice.get("logprobs"):
generation_info["logprobs"] = logprobs
chunk = ChatGenerationChunk(
message=chunk_message, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(
chunk.text, chunk=chunk, logprobs=logprobs
)
yield chunk
else:
# Handle the case where choices are empty if needed
continue
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "mlflow-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = cast(str, _dict.get("content"))
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
content = content or ""
additional_kwargs: Dict = {}
tool_calls = []
invalid_tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(
parse_tool_call(raw_tool_call, return_id=True)
)
except Exception as e:
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
return AIMessage(
content=content,
additional_kwargs=additional_kwargs,
id=_dict.get("id"),
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_role: str
) -> BaseMessageChunk:
role = _dict.get("role", default_role)
content = _dict.get("content") or ""
if role == "user":
return HumanMessageChunk(content=content)
elif role == "assistant":
additional_kwargs: Dict = {}
tool_call_chunks = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
try:
tool_call_chunks = [
tool_call_chunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
for rtc in raw_tool_calls
]
except KeyError:
pass
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
id=_dict.get("id"),
tool_call_chunks=tool_call_chunks,
)
elif role == "system":
return SystemMessageChunk(content=content)
elif role == "tool":
return ToolMessageChunk(
content=content, tool_call_id=_dict["tool_call_id"], id=_dict.get("id")
)
else:
return ChatMessageChunk(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict = {"content": message.content}
if (name := message.name or message.additional_kwargs.get("name")) is not None:
message_dict["name"] = name
if isinstance(message, ChatMessage):
message_dict["role"] = message.role
elif isinstance(message, HumanMessage):
message_dict["role"] = "user"
elif isinstance(message, AIMessage):
message_dict["role"] = "assistant"
if message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_openai_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_openai_tool_call(tc)
for tc in message.invalid_tool_calls
] # type: ignore[assignment]
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
tool_call_supported_props = {"id", "type", "function"}
message_dict["tool_calls"] = [
{
k: v
for k, v in tool_call.items() # type: ignore[union-attr]
if k in tool_call_supported_props
}
for tool_call in message_dict["tool_calls"]
]
else:
pass
# If tool calls present, content null value should be None not empty string.
if "tool_calls" in message_dict:
message_dict["content"] = message_dict["content"] or None # type: ignore[assignment]
elif isinstance(message, SystemMessage):
message_dict["role"] = "system"
elif isinstance(message, ToolMessage):
message_dict["role"] = "tool"
message_dict["tool_call_id"] = message.tool_call_id
supported_props = {"content", "role", "tool_call_id"}
message_dict = {
k: v for k, v in message_dict.items() if k in supported_props
}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatMlflow._raise_functions_not_supported()
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["choices"]:
message = ChatMlflow._convert_dict_to_message(choice["message"])
usage = choice.get("usage", {})
gen = ChatGeneration(
message=message,
generation_info=usage,
)
generations.append(gen)
usage = response.get("usage", {})
return ChatResult(generations=generations, llm_output=usage)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[
Union[dict, str, Literal["auto", "none", "required", "any"], bool]
] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
tool_choice: Which tool to require the model to call.
Options are:
name of the tool (str): calls corresponding tool;
"auto": automatically selects a tool (including no tool);
"none": model does not generate any tool calls and instead must
generate a standard assistant message;
"required": the model picks the most relevant tool in tools and
must generate a tool call;
or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice:
if isinstance(tool_choice, str):
# tool_choice is a tool/function name
if tool_choice not in ("auto", "none", "required"):
tool_choice = {
"type": "function",
"function": {"name": tool_choice},
}
elif isinstance(tool_choice, dict):
tool_names = [
formatted_tool["function"]["name"]
for formatted_tool in formatted_tools
]
if not any(
tool_name == tool_choice["function"]["name"]
for tool_name in tool_names
):
raise ValueError(
f"Tool choice {tool_choice} was specified, but the only "
f"provided tools were {tool_names}."
)
else:
raise ValueError(
f"Unrecognized tool_choice type. Expected str, bool or dict. "
f"Received: {tool_choice}"
)
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"]),
},
}
def _lc_invalid_tool_call_to_openai_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/fireworks.py | from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
Union,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.utils import convert_to_secret_str
from langchain_core.utils.env import get_from_dict_or_env
from pydantic import Field, SecretStr, model_validator
from langchain_community.adapters.openai import convert_message_to_dict
def _convert_delta_to_message_chunk(
_dict: Any, default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
"""Convert a delta response to a message chunk."""
role = _dict.role
content = _dict.content or ""
additional_kwargs: Dict = {}
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict.name)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content) # type: ignore[call-arg]
def convert_dict_to_message(_dict: Any) -> BaseMessage:
"""Convert a dict response to a message."""
role = _dict.role
content = _dict.content or ""
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
content = _dict.content
additional_kwargs: Dict = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=content)
elif role == "function":
return FunctionMessage(content=content, name=_dict.name)
else:
return ChatMessage(content=content, role=role)
@deprecated(
since="0.0.26",
removal="1.0",
alternative_import="langchain_fireworks.ChatFireworks",
)
class ChatFireworks(BaseChatModel):
"""Fireworks Chat models."""
model: str = "accounts/fireworks/models/llama-v2-7b-chat"
model_kwargs: dict = Field(
default_factory=lambda: {
"temperature": 0.7,
"max_tokens": 512,
"top_p": 1,
}.copy()
)
fireworks_api_key: Optional[SecretStr] = None
max_retries: int = 20
use_retry: bool = True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"fireworks_api_key": "FIREWORKS_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "fireworks"]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
"Could not import fireworks-ai python package. "
"Please install it with `pip install fireworks-ai`."
) from e
fireworks_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "fireworks_api_key", "FIREWORKS_API_KEY")
)
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fireworks-chat"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = self._create_message_dicts(messages)
params = {
"model": self.model,
"messages": message_dicts,
**self.model_kwargs,
**kwargs,
}
response = completion_with_retry(
self,
self.use_retry,
run_manager=run_manager,
stop=stop,
**params,
)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = self._create_message_dicts(messages)
params = {
"model": self.model,
"messages": message_dicts,
**self.model_kwargs,
**kwargs,
}
response = await acompletion_with_retry(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
)
return self._create_chat_result(response)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
if llm_outputs[0] is None:
return {}
return llm_outputs[0]
def _create_chat_result(self, response: Any) -> ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message(res.message)
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.finish_reason),
)
generations.append(gen)
llm_output = {"model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: List[BaseMessage]
) -> List[Dict[str, Any]]:
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts = self._create_message_dicts(messages)
default_chunk_class = AIMessageChunk
params = {
"model": self.model,
"messages": message_dicts,
"stream": True,
**self.model_kwargs,
**kwargs,
}
for chunk in completion_with_retry(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
choice = chunk.choices[0]
chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class)
finish_reason = choice.finish_reason
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts = self._create_message_dicts(messages)
default_chunk_class = AIMessageChunk
params = {
"model": self.model,
"messages": message_dicts,
"stream": True,
**self.model_kwargs,
**kwargs,
}
async for chunk in await acompletion_with_retry_streaming(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
choice = chunk.choices[0]
chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class)
finish_reason = choice.finish_reason
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text, chunk=cg_chunk)
yield cg_chunk
def conditional_decorator(
condition: bool, decorator: Callable[[Any], Any]
) -> Callable[[Any], Any]:
"""Define conditional decorator.
Args:
condition: The condition.
decorator: The decorator.
Returns:
The decorated function.
"""
def actual_decorator(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
if condition:
return decorator(func)
return func
return actual_decorator
def completion_with_retry(
llm: ChatFireworks,
use_retry: bool,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
return fireworks.client.ChatCompletion.create(
**kwargs,
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: ChatFireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return await fireworks.client.ChatCompletion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
async def acompletion_with_retry_streaming(
llm: ChatFireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call for streaming."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.ChatCompletion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def _create_retry_decorator(
llm: ChatFireworks,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [
fireworks.client.error.RateLimitError,
fireworks.client.error.InternalServerError,
fireworks.client.error.BadGatewayError,
fireworks.client.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/snowflake.py | import json
from typing import Any, Dict, List, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import Field, SecretStr, model_validator
SUPPORTED_ROLES: List[str] = [
"system",
"user",
"assistant",
]
class ChatSnowflakeCortexError(Exception):
"""Error with Snowpark client."""
def _convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any] = {
"content": message.content,
}
# populate role and additional message data
if isinstance(message, ChatMessage) and message.role in SUPPORTED_ROLES:
message_dict["role"] = message.role
elif isinstance(message, SystemMessage):
message_dict["role"] = "system"
elif isinstance(message, HumanMessage):
message_dict["role"] = "user"
elif isinstance(message, AIMessage):
message_dict["role"] = "assistant"
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates text at the earliest stop token found."""
if stop is None:
return text
for stop_token in stop:
stop_token_idx = text.find(stop_token)
if stop_token_idx != -1:
text = text[:stop_token_idx]
return text
class ChatSnowflakeCortex(BaseChatModel):
"""Snowflake Cortex based Chat model
To use you must have the ``snowflake-snowpark-python`` Python package installed and
either:
1. environment variables set with your snowflake credentials or
2. directly passed in as kwargs to the ChatSnowflakeCortex constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatSnowflakeCortex
chat = ChatSnowflakeCortex()
"""
_sp_session: Any = None
"""Snowpark session object."""
model: str = "snowflake-arctic"
"""Snowflake cortex hosted LLM model name, defaulted to `snowflake-arctic`.
Refer to docs for more options."""
cortex_function: str = "complete"
"""Cortex function to use, defaulted to `complete`.
Refer to docs for more options."""
temperature: float = 0.7
"""Model temperature. Value should be >= 0 and <= 1.0"""
max_tokens: Optional[int] = None
"""The maximum number of output tokens in the response."""
top_p: Optional[float] = None
"""top_p adjusts the number of choices for each predicted tokens based on
cumulative probabilities. Value should be ranging between 0.0 and 1.0.
"""
snowflake_username: Optional[str] = Field(default=None, alias="username")
"""Automatically inferred from env var `SNOWFLAKE_USERNAME` if not provided."""
snowflake_password: Optional[SecretStr] = Field(default=None, alias="password")
"""Automatically inferred from env var `SNOWFLAKE_PASSWORD` if not provided."""
snowflake_account: Optional[str] = Field(default=None, alias="account")
"""Automatically inferred from env var `SNOWFLAKE_ACCOUNT` if not provided."""
snowflake_database: Optional[str] = Field(default=None, alias="database")
"""Automatically inferred from env var `SNOWFLAKE_DATABASE` if not provided."""
snowflake_schema: Optional[str] = Field(default=None, alias="schema")
"""Automatically inferred from env var `SNOWFLAKE_SCHEMA` if not provided."""
snowflake_warehouse: Optional[str] = Field(default=None, alias="warehouse")
"""Automatically inferred from env var `SNOWFLAKE_WAREHOUSE` if not provided."""
snowflake_role: Optional[str] = Field(default=None, alias="role")
"""Automatically inferred from env var `SNOWFLAKE_ROLE` if not provided."""
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
try:
from snowflake.snowpark import Session
except ImportError:
raise ImportError(
"`snowflake-snowpark-python` package not found, please install it with "
"`pip install snowflake-snowpark-python`"
)
values["snowflake_username"] = get_from_dict_or_env(
values, "snowflake_username", "SNOWFLAKE_USERNAME"
)
values["snowflake_password"] = convert_to_secret_str(
get_from_dict_or_env(values, "snowflake_password", "SNOWFLAKE_PASSWORD")
)
values["snowflake_account"] = get_from_dict_or_env(
values, "snowflake_account", "SNOWFLAKE_ACCOUNT"
)
values["snowflake_database"] = get_from_dict_or_env(
values, "snowflake_database", "SNOWFLAKE_DATABASE"
)
values["snowflake_schema"] = get_from_dict_or_env(
values, "snowflake_schema", "SNOWFLAKE_SCHEMA"
)
values["snowflake_warehouse"] = get_from_dict_or_env(
values, "snowflake_warehouse", "SNOWFLAKE_WAREHOUSE"
)
values["snowflake_role"] = get_from_dict_or_env(
values, "snowflake_role", "SNOWFLAKE_ROLE"
)
connection_params = {
"account": values["snowflake_account"],
"user": values["snowflake_username"],
"password": values["snowflake_password"].get_secret_value(),
"database": values["snowflake_database"],
"schema": values["snowflake_schema"],
"warehouse": values["snowflake_warehouse"],
"role": values["snowflake_role"],
}
try:
values["_sp_session"] = Session.builder.configs(connection_params).create()
except Exception as e:
raise ChatSnowflakeCortexError(f"Failed to create session: {e}")
return values
def __del__(self) -> None:
if getattr(self, "_sp_session", None) is not None:
self._sp_session.close()
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return f"snowflake-cortex-{self.model}"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [_convert_message_to_dict(m) for m in messages]
message_str = str(message_dicts)
options = {"temperature": self.temperature}
if self.top_p is not None:
options["top_p"] = self.top_p
if self.max_tokens is not None:
options["max_tokens"] = self.max_tokens
options_str = str(options)
sql_stmt = f"""
select snowflake.cortex.{self.cortex_function}(
'{self.model}'
,{message_str},{options_str}) as llm_response;"""
try:
l_rows = self._sp_session.sql(sql_stmt).collect()
except Exception as e:
raise ChatSnowflakeCortexError(
f"Error while making request to Snowflake Cortex via Snowpark: {e}"
)
response = json.loads(l_rows[0]["LLM_RESPONSE"])
ai_message_content = response["choices"][0]["messages"]
content = _truncate_at_stop_tokens(ai_message_content, stop)
message = AIMessage(
content=content,
response_metadata=response["usage"],
)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/litellm.py | """Wrapper around LiteLLM's model I/O library."""
from __future__ import annotations
import json
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolCall,
ToolCallChunk,
ToolMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class ChatLiteLLMException(Exception):
"""Error with the `LiteLLM I/O` library"""
def _create_retry_decorator(
llm: ChatLiteLLM,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import litellm
errors = [
litellm.Timeout,
litellm.APIError,
litellm.APIConnectionError,
litellm.RateLimitError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs = {}
if _dict.get("function_call"):
additional_kwargs["function_call"] = dict(_dict["function_call"])
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
async def acompletion_with_retry(
llm: ChatLiteLLM,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
tool_call_chunks = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
try:
tool_call_chunks = [
ToolCallChunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
for rtc in raw_tool_calls
]
except KeyError:
pass
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
tool_call_chunks=tool_call_chunks,
)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"]),
},
}
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any] = {"content": message.content}
if isinstance(message, ChatMessage):
message_dict["role"] = message.role
elif isinstance(message, HumanMessage):
message_dict["role"] = "user"
elif isinstance(message, AIMessage):
message_dict["role"] = "assistant"
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
if message.tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_openai_tool_call(tc) for tc in message.tool_calls
]
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
elif isinstance(message, SystemMessage):
message_dict["role"] = "system"
elif isinstance(message, FunctionMessage):
message_dict["role"] = "function"
message_dict["name"] = message.name
elif isinstance(message, ToolMessage):
message_dict["role"] = "tool"
message_dict["tool_call_id"] = message.tool_call_id
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatLiteLLM(BaseChatModel):
"""Chat model that uses the LiteLLM API."""
client: Any = None #: :meta private:
model: str = "gpt-3.5-turbo"
model_name: Optional[str] = None
"""Model name to use."""
openai_api_key: Optional[str] = None
azure_api_key: Optional[str] = None
anthropic_api_key: Optional[str] = None
replicate_api_key: Optional[str] = None
cohere_api_key: Optional[str] = None
openrouter_api_key: Optional[str] = None
streaming: bool = False
api_base: Optional[str] = None
organization: Optional[str] = None
custom_llm_provider: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
temperature: Optional[float] = 1
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Run inference with this temperature. Must be in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
max_tokens: Optional[int] = None
max_retries: int = 6
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"force_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"custom_llm_provider": self.custom_llm_provider,
**self.model_kwargs,
}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
self.client.api_base = self.api_base
self.client.organization = self.organization
creds: Dict[str, Any] = {
"model": set_model_value,
"force_timeout": self.request_timeout,
"api_base": self.api_base,
}
return {**self._default_params, **creds}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.completion(**kwargs)
return _completion_with_retry(**kwargs)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
try:
import litellm
except ImportError:
raise ChatLiteLLMException(
"Could not import litellm python package. "
"Please install it with `pip install litellm`"
)
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY", default=""
)
values["azure_api_key"] = get_from_dict_or_env(
values, "azure_api_key", "AZURE_API_KEY", default=""
)
values["anthropic_api_key"] = get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY", default=""
)
values["replicate_api_key"] = get_from_dict_or_env(
values, "replicate_api_key", "REPLICATE_API_KEY", default=""
)
values["openrouter_api_key"] = get_from_dict_or_env(
values, "openrouter_api_key", "OPENROUTER_API_KEY", default=""
)
values["cohere_api_key"] = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY", default=""
)
values["huggingface_api_key"] = get_from_dict_or_env(
values, "huggingface_api_key", "HUGGINGFACE_API_KEY", default=""
)
values["together_ai_api_key"] = get_from_dict_or_env(
values, "together_ai_api_key", "TOGETHERAI_API_KEY", default=""
)
values["client"] = litellm
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
llm_output = {"token_usage": token_usage, "model": set_model_value}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
LiteLLM expects tools argument in OpenAI format.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function or
"auto" to automatically determine which function to call
(if any), or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "litellm-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/mlx.py | """MLX Chat Wrapper."""
from typing import Any, Iterator, List, Optional
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
LLMResult,
)
from langchain_community.llms.mlx_pipeline import MLXPipeline
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."""
class ChatMLX(BaseChatModel):
"""MLX chat models.
Works with `MLXPipeline` LLM.
To use, you should have the ``mlx-lm`` python package installed.
Example:
.. code-block:: python
from langchain_community.chat_models import chatMLX
from langchain_community.llms import MLXPipeline
llm = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b-it",
)
chat = chatMLX(llm=llm)
"""
llm: MLXPipeline
system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT)
tokenizer: Any = None
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.tokenizer = self.llm.tokenizer
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
llm_input = self._to_chat_prompt(messages)
llm_result = self.llm._generate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
llm_input = self._to_chat_prompt(messages)
llm_result = await self.llm._agenerate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
def _to_chat_prompt(
self,
messages: List[BaseMessage],
tokenize: bool = False,
return_tensors: Optional[str] = None,
) -> str:
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError("At least one HumanMessage must be provided!")
if not isinstance(messages[-1], HumanMessage):
raise ValueError("Last message must be a HumanMessage!")
messages_dicts = [self._to_chatml_format(m) for m in messages]
return self.tokenizer.apply_chat_template(
messages_dicts,
tokenize=tokenize,
add_generation_prompt=True,
return_tensors=return_tensors,
)
def _to_chatml_format(self, message: BaseMessage) -> dict:
"""Convert LangChain message to ChatML format."""
if isinstance(message, SystemMessage):
role = "system"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, HumanMessage):
role = "user"
else:
raise ValueError(f"Unknown message type: {type(message)}")
return {"role": role, "content": message.content}
@staticmethod
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
chat_generations = []
for g in llm_result.generations[0]:
chat_generation = ChatGeneration(
message=AIMessage(content=g.text), generation_info=g.generation_info
)
chat_generations.append(chat_generation)
return ChatResult(
generations=chat_generations, llm_output=llm_result.llm_output
)
@property
def _llm_type(self) -> str:
return "mlx-chat-wrapper"
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
import mlx.core as mx
from mlx_lm.utils import generate_step
try:
import mlx.core as mx
from mlx_lm.utils import generate_step
except ImportError:
raise ImportError(
"Could not import mlx_lm python package. "
"Please install it with `pip install mlx_lm`."
)
model_kwargs = kwargs.get("model_kwargs", self.llm.pipeline_kwargs)
temp: float = model_kwargs.get("temp", 0.0)
max_new_tokens: int = model_kwargs.get("max_tokens", 100)
repetition_penalty: Optional[float] = model_kwargs.get(
"repetition_penalty", None
)
repetition_context_size: Optional[int] = model_kwargs.get(
"repetition_context_size", None
)
llm_input = self._to_chat_prompt(messages, tokenize=True, return_tensors="np")
prompt_tokens = mx.array(llm_input[0])
eos_token_id = self.tokenizer.eos_token_id
for (token, prob), n in zip(
generate_step(
prompt_tokens,
self.llm.model,
temp,
repetition_penalty,
repetition_context_size,
),
range(max_new_tokens),
):
# identify text to yield
text: Optional[str] = None
text = self.tokenizer.decode(token.item())
# yield text, if any
if text:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=text))
if run_manager:
run_manager.on_llm_new_token(text, chunk=chunk)
yield chunk
# break if stop sequence found
if token == eos_token_id or (stop is not None and text in stop):
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.