id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
1a31b8ea94ee-1
about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with removal. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. """ if pending and removal: raise ValueError("A pending deprecation cannot have a scheduled removal") if not pending: if not removal: removal = f"in {removal}" if removal else "within ?? minor releases" raise NotImplementedError( f"Need to determine which default deprecation schedule to use. " f"{removal}" ) else: removal = f"in {removal}" if not message: message = "" if obj_type: message += f"The {obj_type} `{name}`" else: message += f"`{name}`" if pending: message += " will be deprecated in a future version" else: message += f" was deprecated in LangChain {since}" if removal: message += f" and will be removed {removal}" if alternative: message += f". Use {alternative} instead." if addendum: message += f" {addendum}" warning_cls = PendingDeprecationWarning if pending else LangChainDeprecationWarning warning = warning_cls(message)
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
1a31b8ea94ee-2
warning = warning_cls(message) warnings.warn(warning, category=LangChainDeprecationWarning, stacklevel=2) # PUBLIC API T = TypeVar("T", Type, Callable) [docs]def deprecated( since: str, *, message: str = "", name: str = "", alternative: str = "", pending: bool = False, obj_type: str = "", addendum: str = "", removal: str = "", ) -> Callable[[T], T]: """Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ``@property``. When deprecating a class ``C`` intended to be used as a base class in a multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method (if ``C`` instead inherited its ``__init__`` from its own base class, then ``@deprecated`` would mess up ``__init__`` inheritance when installing its own (deprecation-emitting) ``C.__init__``). Parameters are the same as for `warn_deprecated`, except that *obj_type* defaults to 'class' if decorating a class, 'attribute' if decorating a property, and 'function' otherwise. Arguments: since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The %(since)s,
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
1a31b8ea94ee-3
Override the default deprecation message. The %(since)s, %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, and %(removal)s format specifiers will be replaced by the values of the respective arguments passed to this function. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with removal. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. Examples -------- .. code-block:: python @deprecated('1.4.0') def the_function_to_deprecate(): pass """ def deprecate( obj: T, *, _obj_type: str = obj_type, _name: str = name, _message: str = message, _alternative: str = alternative, _pending: bool = pending, _addendum: str = addendum, ) -> T: """Implementation of the decorator returned by `deprecated`.""" if isinstance(obj, type): if not _obj_type:
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
1a31b8ea94ee-4
if isinstance(obj, type): if not _obj_type: _obj_type = "class" wrapped = obj.__init__ # type: ignore _name = _name or obj.__name__ old_doc = obj.__doc__ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: """Finalize the deprecation of a class.""" try: obj.__doc__ = new_doc except AttributeError: # Can't set on some extension objects. pass obj.__init__ = functools.wraps(obj.__init__)( # type: ignore[misc] wrapper ) return obj elif isinstance(obj, property): if not _obj_type: _obj_type = "attribute" wrapped = None _name = _name or obj.fget.__name__ old_doc = obj.__doc__ class _deprecated_property(type(obj)): # type: ignore """A deprecated property.""" def __get__(self, instance, owner=None): # type: ignore if instance is not None or owner is not None: emit_warning() return super().__get__(instance, owner) def __set__(self, instance, value): # type: ignore if instance is not None: emit_warning() return super().__set__(instance, value) def __delete__(self, instance): # type: ignore if instance is not None: emit_warning() return super().__delete__(instance) def __set_name__(self, owner, set_name): # type: ignore nonlocal _name if _name == "<lambda>": _name = set_name
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
1a31b8ea94ee-5
if _name == "<lambda>": _name = set_name def finalize(_: Any, new_doc: str) -> Any: # type: ignore """Finalize the property.""" return _deprecated_property( fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc ) else: if not _obj_type: _obj_type = "function" wrapped = obj _name = _name or obj.__name__ # type: ignore old_doc = wrapped.__doc__ def finalize( # type: ignore wrapper: Callable[..., Any], new_doc: str ) -> T: """Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function. """ wrapper = functools.wraps(wrapped)(wrapper) wrapper.__doc__ = new_doc return wrapper def emit_warning() -> None: """Emit the warning.""" _warn_deprecated( since, message=_message, name=_name, alternative=_alternative, pending=_pending, obj_type=_obj_type, addendum=_addendum, removal=removal, ) def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: """Wrapper for the original wrapped callable that emits a warning. Args: *args: The positional arguments to the function. **kwargs: The keyword arguments to the function. Returns: The return value of the function being wrapped. """ emit_warning()
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
1a31b8ea94ee-6
The return value of the function being wrapped. """ emit_warning() return wrapped(*args, **kwargs) old_doc = inspect.cleandoc(old_doc or "").strip("\n") if not old_doc: new_doc = "[*Deprecated*]" else: new_doc = f"[*Deprecated*] {old_doc}" return finalize(warning_emitting_wrapper, new_doc) return deprecate [docs]@contextlib.contextmanager def suppress_langchain_deprecation_warning() -> Generator[None, None, None]: """Context manager to suppress LangChainDeprecationWarning.""" with warnings.catch_warnings(): warnings.simplefilter("ignore", LangChainDeprecationWarning) yield
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
f2fdcb0656a9-0
Source code for langchain.indexes.graph """Graph Index Creator.""" from typing import Optional, Type from pydantic import BaseModel from langchain import BasePromptTemplate from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain.indexes.prompts.knowledge_triplet_extraction import ( KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.schema.language_model import BaseLanguageModel [docs]class GraphIndexCreator(BaseModel): """Functionality to create graph index.""" llm: Optional[BaseLanguageModel] = None graph_type: Type[NetworkxEntityGraph] = NetworkxEntityGraph [docs] def from_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=prompt) output = chain.predict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph [docs] async def afrom_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text asynchronously.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=prompt)
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/graph.html
f2fdcb0656a9-1
chain = LLMChain(llm=self.llm, prompt=prompt) output = await chain.apredict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/graph.html
b2483446cd11-0
Source code for langchain.indexes.vectorstore from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA from langchain.document_loaders.base import BaseLoader from langchain.embeddings.base import Embeddings from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms.openai import OpenAI from langchain.schema import Document from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma def _get_default_text_splitter() -> TextSplitter: return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) [docs]class VectorStoreIndexWrapper(BaseModel): """Wrapper around a vectorstore for easy access.""" vectorstore: VectorStore class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def query( self, question: str, llm: Optional[BaseLanguageModel] = None, retriever_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any ) -> str: """Query the vectorstore.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQA.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/vectorstore.html
b2483446cd11-1
) return chain.run(question) [docs] def query_with_sources( self, question: str, llm: Optional[BaseLanguageModel] = None, retriever_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any ) -> dict: """Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs ) return chain({chain.question_key: question}) [docs]class VectorstoreIndexCreator(BaseModel): """Logic for creating indexes.""" vectorstore_cls: Type[VectorStore] = Chroma embedding: Embeddings = Field(default_factory=OpenAIEmbeddings) text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) vectorstore_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper: """Create a vectorstore index from loaders.""" docs = [] for loader in loaders: docs.extend(loader.load()) return self.from_documents(docs) [docs] def from_documents(self, documents: List[Document]) -> VectorStoreIndexWrapper: """Create a vectorstore index from documents.""" sub_docs = self.text_splitter.split_documents(documents) vectorstore = self.vectorstore_cls.from_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/vectorstore.html
b2483446cd11-2
vectorstore = self.vectorstore_cls.from_documents( sub_docs, self.embedding, **self.vectorstore_kwargs ) return VectorStoreIndexWrapper(vectorstore=vectorstore)
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/vectorstore.html
cd23f6397dd5-0
Source code for langchain.load.load import importlib import json import os from typing import Any, Dict, List, Optional from langchain.load.serializable import Serializable [docs]class Reviver: """Reviver for JSON objects.""" [docs] def __init__( self, secrets_map: Optional[Dict[str, str]] = None, valid_namespaces: Optional[List[str]] = None, ) -> None: self.secrets_map = secrets_map or dict() # By default only support langchain, but user can pass in additional namespaces self.valid_namespaces = ( ["langchain", *valid_namespaces] if valid_namespaces else ["langchain"] ) def __call__(self, value: Dict[str, Any]) -> Any: if ( value.get("lc", None) == 1 and value.get("type", None) == "secret" and value.get("id", None) is not None ): [key] = value["id"] if key in self.secrets_map: return self.secrets_map[key] else: if key in os.environ and os.environ[key]: return os.environ[key] raise KeyError(f'Missing key "{key}" in load(secrets_map)') if ( value.get("lc", None) == 1 and value.get("type", None) == "not_implemented" and value.get("id", None) is not None ): raise NotImplementedError( "Trying to load an object that doesn't implement " f"serialization: {value}" ) if ( value.get("lc", None) == 1
https://api.python.langchain.com/en/latest/_modules/langchain/load/load.html
cd23f6397dd5-1
) if ( value.get("lc", None) == 1 and value.get("type", None) == "constructor" and value.get("id", None) is not None ): [*namespace, name] = value["id"] if namespace[0] not in self.valid_namespaces: raise ValueError(f"Invalid namespace: {value}") # The root namespace "langchain" is not a valid identifier. if len(namespace) == 1 and namespace[0] == "langchain": raise ValueError(f"Invalid namespace: {value}") mod = importlib.import_module(".".join(namespace)) cls = getattr(mod, name) # The class must be a subclass of Serializable. if not issubclass(cls, Serializable): raise ValueError(f"Invalid namespace: {value}") # We don't need to recurse on kwargs # as json.loads will do that for us. kwargs = value.get("kwargs", dict()) return cls(**kwargs) return value [docs]def loads( text: str, *, secrets_map: Optional[Dict[str, str]] = None, valid_namespaces: Optional[List[str]] = None, ) -> Any: """Revive a LangChain class from a JSON string. Equivalent to `load(json.loads(text))`. Args: text: The string to load. secrets_map: A map of secrets to load. valid_namespaces: A list of additional namespaces (modules) to allow to be deserialized. Returns: Revived LangChain objects. """ return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces)) [docs]def load(
https://api.python.langchain.com/en/latest/_modules/langchain/load/load.html
cd23f6397dd5-2
[docs]def load( obj: Any, *, secrets_map: Optional[Dict[str, str]] = None, valid_namespaces: Optional[List[str]] = None, ) -> Any: """Revive a LangChain class from a JSON object. Use this if you already have a parsed JSON object, eg. from `json.load` or `orjson.loads`. Args: obj: The object to load. secrets_map: A map of secrets to load. valid_namespaces: A list of additional namespaces (modules) to allow to be deserialized. Returns: Revived LangChain objects. """ reviver = Reviver(secrets_map, valid_namespaces) def _load(obj: Any) -> Any: if isinstance(obj, dict): # Need to revive leaf nodes before reviving this node loaded_obj = {k: _load(v) for k, v in obj.items()} return reviver(loaded_obj) if isinstance(obj, list): return [_load(o) for o in obj] return obj return _load(obj)
https://api.python.langchain.com/en/latest/_modules/langchain/load/load.html
8862b45a94fd-0
Source code for langchain.load.serializable from abc import ABC from typing import Any, Dict, List, Literal, TypedDict, Union, cast from pydantic import BaseModel, PrivateAttr [docs]class BaseSerialized(TypedDict): """Base class for serialized objects.""" lc: int id: List[str] [docs]class SerializedConstructor(BaseSerialized): """Serialized constructor.""" type: Literal["constructor"] kwargs: Dict[str, Any] [docs]class SerializedSecret(BaseSerialized): """Serialized secret.""" type: Literal["secret"] [docs]class SerializedNotImplemented(BaseSerialized): """Serialized not implemented.""" type: Literal["not_implemented"] [docs]class Serializable(BaseModel, ABC): """Serializable base class.""" @property def lc_serializable(self) -> bool: """ Return whether or not the class is serializable. """ return False @property def lc_namespace(self) -> List[str]: """ Return the namespace of the langchain object. eg. ["langchain", "llms", "openai"] """ return self.__class__.__module__.split(".") @property def lc_secrets(self) -> Dict[str, str]: """ Return a map of constructor argument names to secret ids. eg. {"openai_api_key": "OPENAI_API_KEY"} """ return dict() @property def lc_attributes(self) -> Dict: """ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. """ return {} class Config: extra = "ignore"
https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html
8862b45a94fd-1
""" return {} class Config: extra = "ignore" _lc_kwargs = PrivateAttr(default_factory=dict) def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self._lc_kwargs = kwargs [docs] def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: if not self.lc_serializable: return self.to_json_not_implemented() secrets = dict() # Get latest values for kwargs if there is an attribute with same name lc_kwargs = { k: getattr(self, k, v) for k, v in self._lc_kwargs.items() if not (self.__exclude_fields__ or {}).get(k, False) # type: ignore } # Merge the lc_secrets and lc_attributes from every class in the MRO for cls in [None, *self.__class__.mro()]: # Once we get to Serializable, we're done if cls is Serializable: break # Get a reference to self bound to each class in the MRO this = cast(Serializable, self if cls is None else super(cls, self)) secrets.update(this.lc_secrets) lc_kwargs.update(this.lc_attributes) # include all secrets, even if not specified in kwargs # as these secrets may be passed as an environment variable instead for key in secrets.keys(): secret_value = getattr(self, key, None) or lc_kwargs.get(key) if secret_value is not None: lc_kwargs.update({key: secret_value}) return { "lc": 1, "type": "constructor",
https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html
8862b45a94fd-2
return { "lc": 1, "type": "constructor", "id": [*self.lc_namespace, self.__class__.__name__], "kwargs": lc_kwargs if not secrets else _replace_secrets(lc_kwargs, secrets), } [docs] def to_json_not_implemented(self) -> SerializedNotImplemented: return to_json_not_implemented(self) def _replace_secrets( root: Dict[Any, Any], secrets_map: Dict[str, str] ) -> Dict[Any, Any]: result = root.copy() for path, secret_id in secrets_map.items(): [*parts, last] = path.split(".") current = result for part in parts: if part not in current: break current[part] = current[part].copy() current = current[part] if last in current: current[last] = { "lc": 1, "type": "secret", "id": [secret_id], } return result [docs]def to_json_not_implemented(obj: object) -> SerializedNotImplemented: """Serialize a "not implemented" object. Args: obj: object to serialize Returns: SerializedNotImplemented """ _id: List[str] = [] try: if hasattr(obj, "__name__"): _id = [*obj.__module__.split("."), obj.__name__] elif hasattr(obj, "__class__"): _id = [*obj.__class__.__module__.split("."), obj.__class__.__name__] except Exception: pass return { "lc": 1,
https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html
8862b45a94fd-3
except Exception: pass return { "lc": 1, "type": "not_implemented", "id": _id, }
https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html
269bbfcb3398-0
Source code for langchain.load.dump import json from typing import Any, Dict from langchain.load.serializable import Serializable, to_json_not_implemented [docs]def default(obj: Any) -> Any: """Return a default value for a Serializable object or a SerializedNotImplemented object.""" if isinstance(obj, Serializable): return obj.to_json() else: return to_json_not_implemented(obj) [docs]def dumps(obj: Any, *, pretty: bool = False) -> str: """Return a json string representation of an object.""" if pretty: return json.dumps(obj, default=default, indent=2) else: return json.dumps(obj, default=default) [docs]def dumpd(obj: Any) -> Dict[str, Any]: """Return a json dict representation of an object.""" return json.loads(dumps(obj))
https://api.python.langchain.com/en/latest/_modules/langchain/load/dump.html
dd521dbffa75-0
Source code for langchain.output_parsers.list from __future__ import annotations from abc import abstractmethod from typing import List from langchain.schema import BaseOutputParser [docs]class ListOutputParser(BaseOutputParser[List[str]]): """Parse the output of an LLM call to a list.""" @property def _type(self) -> str: return "list" [docs] @abstractmethod def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" [docs]class CommaSeparatedListOutputParser(ListOutputParser): """Parse the output of an LLM call to a comma-separated list.""" @property def lc_serializable(self) -> bool: return True [docs] def get_format_instructions(self) -> str: return ( "Your response should be a list of comma separated values, " "eg: `foo, bar, baz`" ) [docs] def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" return text.strip().split(", ")
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/list.html
ec2c22c2f0fa-0
Source code for langchain.output_parsers.boolean from langchain.schema import BaseOutputParser [docs]class BooleanOutputParser(BaseOutputParser[bool]): """Parse the output of an LLM call to a boolean.""" true_val: str = "YES" """The string value that should be parsed as True.""" false_val: str = "NO" """The string value that should be parsed as False.""" [docs] def parse(self, text: str) -> bool: """Parse the output of an LLM call to a boolean. Args: text: output of a language model Returns: boolean """ cleaned_text = text.strip() if cleaned_text.upper() not in (self.true_val.upper(), self.false_val.upper()): raise ValueError( f"BooleanOutputParser expected output value to either be " f"{self.true_val} or {self.false_val}. Received {cleaned_text}." ) return cleaned_text.upper() == self.true_val.upper() @property def _type(self) -> str: """Snake-case string identifier for an output parser type.""" return "boolean_output_parser"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/boolean.html
3ee931da1ebe-0
Source code for langchain.output_parsers.structured from __future__ import annotations from typing import Any, List from pydantic import BaseModel from langchain.output_parsers.format_instructions import ( STRUCTURED_FORMAT_INSTRUCTIONS, STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS, ) from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.schema import BaseOutputParser line_template = '\t"{name}": {type} // {description}' [docs]class ResponseSchema(BaseModel): """A schema for a response from a structured output parser.""" name: str """The name of the schema.""" description: str """The description of the schema.""" type: str = "string" """The type of the response.""" def _get_sub_string(schema: ResponseSchema) -> str: return line_template.format( name=schema.name, description=schema.description, type=schema.type ) [docs]class StructuredOutputParser(BaseOutputParser): """Parse the output of an LLM call to a structured output.""" response_schemas: List[ResponseSchema] """The schemas for the response.""" [docs] @classmethod def from_response_schemas( cls, response_schemas: List[ResponseSchema] ) -> StructuredOutputParser: return cls(response_schemas=response_schemas) [docs] def get_format_instructions(self, only_json: bool = False) -> str: """Get format instructions for the output parser. example: ```python from langchain.output_parsers.structured import ( StructuredOutputParser, ResponseSchema ) response_schemas = [ ResponseSchema( name="foo",
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html
3ee931da1ebe-1
response_schemas = [ ResponseSchema( name="foo", description="a list of strings", type="List[string]" ), ResponseSchema( name="bar", description="a string", type="string" ), ] parser = StructuredOutputParser.from_response_schemas(response_schemas) print(parser.get_format_instructions()) output: # The output should be a Markdown code snippet formatted in the following # schema, including the leading and trailing "```json" and "```": # # ```json # { # "foo": List[string] // a list of strings # "bar": string // a string # } Args: only_json (bool): If True, only the json in the Markdown code snippet will be returned, without the introducing text. Defaults to False. """ schema_str = "\n".join( [_get_sub_string(schema) for schema in self.response_schemas] ) if only_json: return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str) else: return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str) [docs] def parse(self, text: str) -> Any: expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys) @property def _type(self) -> str: return "structured"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html
d525f4c7d129-0
Source code for langchain.output_parsers.retry from __future__ import annotations from typing import TypeVar from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseOutputParser, BasePromptTemplate, OutputParserException, PromptValue, ) from langchain.schema.language_model import BaseLanguageModel NAIVE_COMPLETION_RETRY = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Please try again:""" NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Details: {error} Please try again:""" NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY) NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template( NAIVE_COMPLETION_RETRY_WITH_ERROR ) T = TypeVar("T") [docs]class RetryOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. """ parser: BaseOutputParser[T] """The parser to use to parse the output.""" retry_chain: LLMChain """The LLMChain to use to retry the completion.""" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, ) -> RetryOutputParser[T]:
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
d525f4c7d129-1
) -> RetryOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: """Parse the output of an LLM call using a wrapped parser. Args: completion: The chain completion to parse. prompt_value: The prompt to use to parse the completion. Returns: The parsed completion. """ try: parsed_completion = self.parser.parse(completion) except OutputParserException: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: """Parse the output of an LLM call using a wrapped parser. Args: completion: The chain completion to parse. prompt_value: The prompt to use to parse the completion. Returns: The parsed completion. """ try: parsed_completion = self.parser.parse(completion) except OutputParserException: new_completion = await self.retry_chain.arun( prompt=prompt_value.to_string(), completion=completion ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
d525f4c7d129-2
return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry" [docs]class RetryWithErrorOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. """ parser: BaseOutputParser[T] retry_chain: LLMChain [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, ) -> RetryWithErrorOutputParser[T]: """Create a RetryWithErrorOutputParser from an LLM. Args: llm: The LLM to use to retry the completion. parser: The parser to use to parse the output. prompt: The prompt to use to retry the completion. Returns: A RetryWithErrorOutputParser. """ chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run(
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
d525f4c7d129-3
except OutputParserException as e: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion, error=repr(e) ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = await self.retry_chain.arun( prompt=prompt_value.to_string(), completion=completion, error=repr(e) ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry_with_error"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
e327bd2b3830-0
Source code for langchain.output_parsers.openai_functions import copy import json from typing import Any, Dict, List, Type, Union from pydantic import BaseModel, root_validator from langchain.schema import ( ChatGeneration, Generation, OutputParserException, ) from langchain.schema.output_parser import BaseGenerationOutputParser [docs]class OutputFunctionsParser(BaseGenerationOutputParser[Any]): """Parse an output that is one of sets of values.""" args_only: bool = True """Whether to only return the arguments to the function call.""" [docs] def parse_result(self, result: List[Generation]) -> Any: generation = result[0] if not isinstance(generation, ChatGeneration): raise OutputParserException( "This output parser can only be used with a chat generation." ) message = generation.message try: func_call = copy.deepcopy(message.additional_kwargs["function_call"]) except KeyError as exc: raise OutputParserException(f"Could not parse function call: {exc}") if self.args_only: return func_call["arguments"] return func_call [docs]class JsonOutputFunctionsParser(OutputFunctionsParser): """Parse an output as the Json object.""" [docs] def parse_result(self, result: List[Generation]) -> Any: function_call_info = super().parse_result(result) if self.args_only: try: return json.loads(function_call_info) except (json.JSONDecodeError, TypeError) as exc: raise OutputParserException( f"Could not parse function call data: {exc}" ) function_call_info["arguments"] = json.loads(function_call_info["arguments"]) return function_call_info
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/openai_functions.html
e327bd2b3830-1
return function_call_info [docs]class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser): """Parse an output as the element of the Json object.""" key_name: str """The name of the key to return.""" [docs] def parse_result(self, result: List[Generation]) -> Any: res = super().parse_result(result) return res[self.key_name] [docs]class PydanticOutputFunctionsParser(OutputFunctionsParser): """Parse an output as a pydantic object.""" pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]] """The pydantic schema to parse the output with.""" @root_validator(pre=True) def validate_schema(cls, values: Dict) -> Dict: schema = values["pydantic_schema"] if "args_only" not in values: values["args_only"] = isinstance(schema, type) and issubclass( schema, BaseModel ) elif values["args_only"] and isinstance(schema, Dict): raise ValueError( "If multiple pydantic schemas are provided then args_only should be" " False." ) return values [docs] def parse_result(self, result: List[Generation]) -> Any: _result = super().parse_result(result) if self.args_only: pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore else: fn_name = _result["name"] _args = _result["arguments"] pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501 return pydantic_args
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/openai_functions.html
e327bd2b3830-2
return pydantic_args [docs]class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser): """Parse an output as an attribute of a pydantic object.""" attr_name: str """The name of the attribute to return.""" [docs] def parse_result(self, result: List[Generation]) -> Any: result = super().parse_result(result) return getattr(result, self.attr_name)
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/openai_functions.html
01284bacca4e-0
Source code for langchain.output_parsers.combining from __future__ import annotations from typing import Any, Dict, List from pydantic import root_validator from langchain.schema import BaseOutputParser [docs]class CombiningOutputParser(BaseOutputParser): """Combine multiple output parsers into one.""" @property def lc_serializable(self) -> bool: return True parsers: List[BaseOutputParser] @root_validator() def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate the parsers.""" parsers = values["parsers"] if len(parsers) < 2: raise ValueError("Must have at least two parsers") for parser in parsers: if parser._type == "combining": raise ValueError("Cannot nest combining parsers") if parser._type == "list": raise ValueError("Cannot comine list parsers") return values @property def _type(self) -> str: """Return the type key.""" return "combining" [docs] def get_format_instructions(self) -> str: """Instructions on how the LLM output should be formatted.""" initial = f"For your first output: {self.parsers[0].get_format_instructions()}" subsequent = "\n".join( f"Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}" # noqa: E501 for p in self.parsers[1:] ) return f"{initial}\n{subsequent}" [docs] def parse(self, text: str) -> Dict[str, Any]: """Parse the output of an LLM call.""" texts = text.split("\n\n")
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/combining.html
01284bacca4e-1
texts = text.split("\n\n") output = dict() for txt, parser in zip(texts, self.parsers): output.update(parser.parse(txt.strip())) return output
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/combining.html
6a7da42a96a0-0
Source code for langchain.output_parsers.loading from langchain.output_parsers.regex import RegexParser [docs]def load_output_parser(config: dict) -> dict: """Load an output parser. Args: config: config dict Returns: config dict with output parser loaded """ if "output_parsers" in config: if config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parsers"] = output_parser return config
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/loading.html
a5c1a980f393-0
Source code for langchain.output_parsers.pydantic import json import re from typing import Type, TypeVar from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS from langchain.schema import BaseOutputParser, OutputParserException T = TypeVar("T", bound=BaseModel) [docs]class PydanticOutputParser(BaseOutputParser[T]): """Parse an output using a pydantic model.""" pydantic_object: Type[T] """The pydantic model to parse.""" [docs] def parse(self, text: str) -> T: try: # Greedy search for 1st json candidate. match = re.search( r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL ) json_str = "" if match: json_str = match.group() json_object = json.loads(json_str, strict=False) return self.pydantic_object.parse_obj(json_object) except (json.JSONDecodeError, ValidationError) as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {text}. Got: {e}" raise OutputParserException(msg, llm_output=text) [docs] def get_format_instructions(self) -> str: schema = self.pydantic_object.schema() # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema)
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html
a5c1a980f393-1
schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str: return "pydantic"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html
d694bdc8a240-0
Source code for langchain.output_parsers.fix from __future__ import annotations from typing import TypeVar from langchain.chains.llm import LLMChain from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException from langchain.schema.language_model import BaseLanguageModel T = TypeVar("T") [docs]class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @property def lc_serializable(self) -> bool: return True parser: BaseOutputParser[T] retry_chain: LLMChain [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing Returns: OutputFixingParser """ chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse(self, completion: str) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) parsed_completion = self.parser.parse(new_completion) return parsed_completion
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/fix.html
d694bdc8a240-1
) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] async def aparse(self, completion: str) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/fix.html
f94b7aa441dd-0
Source code for langchain.output_parsers.json from __future__ import annotations import json import re from json import JSONDecodeError from typing import Any, List from langchain.schema import BaseOutputParser, OutputParserException def _replace_new_line(match: re.Match[str]) -> str: value = match.group(2) value = re.sub(r"\n", r"\\n", value) value = re.sub(r"\r", r"\\r", value) value = re.sub(r"\t", r"\\t", value) value = re.sub('"', r"\"", value) return match.group(1) + value + match.group(3) def _custom_parser(multiline_string: str) -> str: """ The LLM response for `action_input` may be a multiline string containing unescaped newlines, tabs or quotes. This function replaces those characters with their escaped counterparts. (newlines in JSON must be double-escaped: `\\n`) """ if isinstance(multiline_string, (bytes, bytearray)): multiline_string = multiline_string.decode() multiline_string = re.sub( r'("action_input"\:\s*")(.*)(")', _replace_new_line, multiline_string, flags=re.DOTALL, ) return multiline_string [docs]def parse_json_markdown(json_string: str) -> dict: """ Parse a JSON string from a Markdown string. Args: json_string: The Markdown string. Returns: The parsed JSON object as a Python dictionary. """ # Try to find JSON string within triple backticks
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/json.html
f94b7aa441dd-1
""" # Try to find JSON string within triple backticks match = re.search(r"```(json)?(.*)```", json_string, re.DOTALL) # If no match found, assume the entire string is a JSON string if match is None: json_str = json_string else: # If match found, use the content within the backticks json_str = match.group(2) # Strip whitespace and newlines from the start and end json_str = json_str.strip() # handle newlines and other special characters inside the returned value json_str = _custom_parser(json_str) # Parse the JSON string into a Python dictionary parsed = json.loads(json_str) return parsed [docs]def parse_and_check_json_markdown(text: str, expected_keys: List[str]) -> dict: """ Parse a JSON string from a Markdown string and check that it contains the expected keys. Args: text: The Markdown string. expected_keys: The expected keys in the JSON string. Returns: The parsed JSON object as a Python dictionary. """ try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: raise OutputParserException(f"Got invalid JSON object. Error: {e}") for key in expected_keys: if key not in json_obj: raise OutputParserException( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) return json_obj [docs]class SimpleJsonOutputParser(BaseOutputParser[Any]): """Parse the output of an LLM call to a JSON object."""
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/json.html
f94b7aa441dd-2
"""Parse the output of an LLM call to a JSON object.""" [docs] def parse(self, text: str) -> Any: text = text.strip() try: return json.loads(text) except JSONDecodeError as e: raise OutputParserException(f"Invalid json output: {text}") from e @property def _type(self) -> str: return "simple_json_output_parser"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/json.html
9d90557bce25-0
Source code for langchain.output_parsers.datetime import random from datetime import datetime, timedelta from typing import List from langchain.schema import BaseOutputParser, OutputParserException from langchain.utils import comma_list def _generate_random_datetime_strings( pattern: str, n: int = 3, start_date: datetime = datetime(1, 1, 1), end_date: datetime = datetime.now() + timedelta(days=3650), ) -> List[str]: """Generates n random datetime strings conforming to the given pattern within the specified date range. Pattern should be a string containing the desired format codes. start_date and end_date should be datetime objects representing the start and end of the date range. """ examples = [] delta = end_date - start_date for i in range(n): random_delta = random.uniform(0, delta.total_seconds()) dt = start_date + timedelta(seconds=random_delta) date_string = dt.strftime(pattern) examples.append(date_string) return examples [docs]class DatetimeOutputParser(BaseOutputParser[datetime]): """Parse the output of an LLM call to a datetime.""" format: str = "%Y-%m-%dT%H:%M:%S.%fZ" """The string value that used as the datetime format.""" [docs] def get_format_instructions(self) -> str: examples = comma_list(_generate_random_datetime_strings(self.format)) return f"""Write a datetime string that matches the following pattern: "{self.format}". Examples: {examples}""" [docs] def parse(self, response: str) -> datetime: try: return datetime.strptime(response.strip(), self.format) except ValueError as e:
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/datetime.html
9d90557bce25-1
return datetime.strptime(response.strip(), self.format) except ValueError as e: raise OutputParserException( f"Could not parse datetime string: {response}" ) from e @property def _type(self) -> str: return "datetime"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/datetime.html
a2c8f3968f66-0
Source code for langchain.output_parsers.regex_dict from __future__ import annotations import re from typing import Dict, Optional from langchain.schema import BaseOutputParser [docs]class RegexDictParser(BaseOutputParser): """Parse the output of an LLM call into a Dictionary using a regex.""" regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private: """The regex pattern to use to parse the output.""" output_key_to_format: Dict[str, str] """The keys to use for the output.""" no_update_value: Optional[str] = None """The default key to use for the output.""" @property def _type(self) -> str: """Return the type key.""" return "regex_dict_parser" [docs] def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" result = {} for output_key, expected_format in self.output_key_to_format.items(): specific_regex = self.regex_pattern.format(re.escape(expected_format)) matches = re.findall(specific_regex, text) if not matches: raise ValueError( f"No match found for output key: {output_key} with expected format \ {expected_format} on text {text}" ) elif len(matches) > 1: raise ValueError( f"Multiple matches found for output key: {output_key} with \ expected format {expected_format} on text {text}" ) elif ( self.no_update_value is not None and matches[0] == self.no_update_value ): continue else: result[output_key] = matches[0]
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/regex_dict.html
a2c8f3968f66-1
continue else: result[output_key] = matches[0] return result
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/regex_dict.html
c2dd755b6d1c-0
Source code for langchain.output_parsers.rail_parser from __future__ import annotations from typing import Any, Callable, Dict, Optional from langchain.schema import BaseOutputParser [docs]class GuardrailsOutputParser(BaseOutputParser): """Parse the output of an LLM call using Guardrails.""" guard: Any """The Guardrails object.""" api: Optional[Callable] """The API to use for the Guardrails object.""" args: Any """The arguments to pass to the API.""" kwargs: Any """The keyword arguments to pass to the API.""" @property def _type(self) -> str: return "guardrails" [docs] @classmethod def from_rail( cls, rail_file: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: """Create a GuardrailsOutputParser from a rail file. Args: rail_file: a rail file. num_reasks: number of times to re-ask the question. api: the API to use for the Guardrails object. *args: The arguments to pass to the API **kwargs: The keyword arguments to pass to the API. Returns: GuardrailsOutputParser """ try: from guardrails import Guard except ImportError: raise ImportError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_rail(rail_file, num_reasks=num_reasks),
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
c2dd755b6d1c-1
guard=Guard.from_rail(rail_file, num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) [docs] @classmethod def from_rail_string( cls, rail_str: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ImportError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) [docs] @classmethod def from_pydantic( cls, output_class: Any, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ImportError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_pydantic(output_class, "", num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) [docs] def get_format_instructions(self) -> str: return self.guard.raw_prompt.format_instructions
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
c2dd755b6d1c-2
return self.guard.raw_prompt.format_instructions [docs] def parse(self, text: str) -> Dict: return self.guard.parse(text, llm_api=self.api, *self.args, **self.kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
8ff5045d66f4-0
Source code for langchain.output_parsers.regex from __future__ import annotations import re from typing import Dict, List, Optional from langchain.schema import BaseOutputParser [docs]class RegexParser(BaseOutputParser): """Parse the output of an LLM call using a regex.""" @property def lc_serializable(self) -> bool: return True regex: str """The regex to use to parse the output.""" output_keys: List[str] """The keys to use for the output.""" default_output_key: Optional[str] = None """The default key to use for the output.""" @property def _type(self) -> str: """Return the type key.""" return "regex_parser" [docs] def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} else: if self.default_output_key is None: raise ValueError(f"Could not parse output: {text}") else: return { key: text if key == self.default_output_key else "" for key in self.output_keys }
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/regex.html
91e9b0f67c1d-0
Source code for langchain.output_parsers.enum from enum import Enum from typing import Any, Dict, List, Type from pydantic import root_validator from langchain.schema import BaseOutputParser, OutputParserException [docs]class EnumOutputParser(BaseOutputParser): """Parse an output that is one of a set of values.""" enum: Type[Enum] """The enum to parse. Its values must be strings.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: enum = values["enum"] if not all(isinstance(e.value, str) for e in enum): raise ValueError("Enum values must be strings") return values @property def _valid_values(self) -> List[str]: return [e.value for e in self.enum] [docs] def parse(self, response: str) -> Any: try: return self.enum(response.strip()) except ValueError: raise OutputParserException( f"Response '{response}' is not one of the " f"expected values: {self._valid_values}" ) [docs] def get_format_instructions(self) -> str: return f"Select one of the following options: {', '.join(self._valid_values)}"
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/enum.html
2afd113f9fba-0
Source code for langchain.smith.evaluation.string_run_evaluator """Run evaluator wrapper for string evaluators.""" from __future__ import annotations from abc import abstractmethod from typing import Any, Dict, List, Optional from langsmith import EvaluationResult, RunEvaluator from langsmith.schemas import DataType, Example, Run from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.evaluation.schema import StringEvaluator from langchain.load.dump import dumpd from langchain.load.load import load from langchain.load.serializable import Serializable from langchain.schema import RUN_KEY, messages_from_dict from langchain.schema.messages import BaseMessage, get_buffer_string def _get_messages_from_run_dict(messages: List[dict]) -> List[BaseMessage]: if not messages: return [] first_message = messages[0] if "lc" in first_message: return [load(dumpd(message)) for message in messages] else: return messages_from_dict(messages) [docs]class StringRunMapper(Serializable): """Extract items to evaluate from the run object.""" @property def output_keys(self) -> List[str]: """The keys to extract from the run.""" return ["prediction", "input"] [docs] @abstractmethod def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" [docs] def __call__(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if not run.outputs: raise ValueError(f"Run {run.id} has no outputs to evaluate.") return self.map(run)
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-1
return self.map(run) [docs]class LLMStringRunMapper(StringRunMapper): """Extract items to evaluate from the run object.""" [docs] def serialize_chat_messages(self, messages: List[Dict]) -> str: """Extract the input messages from the run.""" if isinstance(messages, list) and messages: if isinstance(messages[0], dict): chat_messages = _get_messages_from_run_dict(messages) elif isinstance(messages[0], list): # Runs from Tracer have messages as a list of lists of dicts chat_messages = _get_messages_from_run_dict(messages[0]) else: raise ValueError(f"Could not extract messages to evaluate {messages}") return get_buffer_string(chat_messages) raise ValueError(f"Could not extract messages to evaluate {messages}") [docs] def serialize_inputs(self, inputs: Dict) -> str: if "prompts" in inputs: # Should we even accept this? input_ = "\n\n".join(inputs["prompts"]) elif "prompt" in inputs: input_ = inputs["prompt"] elif "messages" in inputs: input_ = self.serialize_chat_messages(inputs["messages"]) else: raise ValueError("LLM Run must have either messages or prompts as inputs.") return input_ [docs] def serialize_outputs(self, outputs: Dict) -> str: if not outputs.get("generations"): raise ValueError("Cannot evaluate LLM Run without generations.") generations: List[Dict] = outputs["generations"] if not generations: raise ValueError("Cannot evaluate LLM run with empty generations.") first_generation: Dict = generations[0] if isinstance(first_generation, list):
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-2
first_generation: Dict = generations[0] if isinstance(first_generation, list): # Runs from Tracer have generations as a list of lists of dicts # Whereas Runs from the API have a list of dicts first_generation = first_generation[0] if "message" in first_generation: output_ = self.serialize_chat_messages([first_generation["message"]]) else: output_ = first_generation["text"] return output_ [docs] def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if run.run_type != "llm": raise ValueError("LLM RunMapper only supports LLM runs.") elif not run.outputs: if run.error: raise ValueError( f"Cannot evaluate errored LLM run {run.id}: {run.error}" ) else: raise ValueError( f"Run {run.id} has no outputs. Cannot evaluate this run." ) else: try: inputs = self.serialize_inputs(run.inputs) except Exception as e: raise ValueError( f"Could not parse LM input from run inputs {run.inputs}" ) from e try: output_ = self.serialize_outputs(run.outputs) except Exception as e: raise ValueError( f"Could not parse LM prediction from run outputs {run.outputs}" ) from e return {"input": inputs, "prediction": output_} [docs]class ChainStringRunMapper(StringRunMapper): """Extract items to evaluate from the run object from a chain.""" input_key: Optional[str] = None """The key from the model Run's inputs to use as the eval input.
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-3
"""The key from the model Run's inputs to use as the eval input. If not provided, will use the only input key or raise an error if there are multiple.""" prediction_key: Optional[str] = None """The key from the model Run's outputs to use as the eval prediction. If not provided, will use the only output key or raise an error if there are multiple.""" def _get_key(self, source: Dict, key: Optional[str], which: str) -> str: if key is not None: return source[key] elif len(source) == 1: return next(iter(source.values())) else: raise ValueError( f"Could not map run {which} with multiple keys: " f"{source}\nPlease manually specify a {which}_key" ) [docs] def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if not run.outputs: raise ValueError(f"Run {run.id} has no outputs to evaluate.") if self.input_key is not None and self.input_key not in run.inputs: raise ValueError(f"Run {run.id} does not have input key {self.input_key}.") elif self.prediction_key is not None and self.prediction_key not in run.outputs: raise ValueError( f"Run {run.id} does not have prediction key {self.prediction_key}." ) else: input_ = self._get_key(run.inputs, self.input_key, "input") prediction = self._get_key(run.outputs, self.prediction_key, "prediction") return { "input": input_, "prediction": prediction, }
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-4
"input": input_, "prediction": prediction, } [docs]class ToolStringRunMapper(StringRunMapper): """Map an input to the tool.""" [docs] def map(self, run: Run) -> Dict[str, str]: if not run.outputs: raise ValueError(f"Run {run.id} has no outputs to evaluate.") return {"input": run.inputs["input"], "prediction": run.outputs["output"]} [docs]class StringExampleMapper(Serializable): """Map an example, or row in the dataset, to the inputs of an evaluation.""" reference_key: Optional[str] = None @property def output_keys(self) -> List[str]: """The keys to extract from the run.""" return ["reference"] [docs] def serialize_chat_messages(self, messages: List[Dict]) -> str: """Extract the input messages from the run.""" chat_messages = _get_messages_from_run_dict(messages) return get_buffer_string(chat_messages) [docs] def map(self, example: Example) -> Dict[str, str]: """Maps the Example, or dataset row to a dictionary.""" if not example.outputs: raise ValueError( f"Example {example.id} has no outputs to use as a reference." ) if self.reference_key is None: if len(example.outputs) > 1: raise ValueError( f"Example {example.id} has multiple outputs, so you must" " specify a reference_key." ) else: output = list(example.outputs.values())[0] elif self.reference_key not in example.outputs: raise ValueError( f"Example {example.id} does not have reference key"
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-5
raise ValueError( f"Example {example.id} does not have reference key" f" {self.reference_key}." ) else: output = example.outputs[self.reference_key] return { "reference": self.serialize_chat_messages([output]) if isinstance(output, dict) and output.get("type") and output.get("data") else str(output) } [docs] def __call__(self, example: Example) -> Dict[str, str]: """Maps the Run and Example to a dictionary.""" if not example.outputs: raise ValueError( f"Example {example.id} has no outputs to use as areference label." ) return self.map(example) [docs]class StringRunEvaluatorChain(Chain, RunEvaluator): """Evaluate Run and optional examples.""" run_mapper: StringRunMapper """Maps the Run to a dictionary with 'input' and 'prediction' strings.""" example_mapper: Optional[StringExampleMapper] = None """Maps the Example (dataset row) to a dictionary with a 'reference' string.""" name: str """The name of the evaluation metric.""" string_evaluator: StringEvaluator """The evaluation chain.""" @property def input_keys(self) -> List[str]: return ["run", "example"] @property def output_keys(self) -> List[str]: return ["feedback"] def _prepare_input(self, inputs: Dict[str, Any]) -> Dict[str, str]: run: Run = inputs["run"] example: Optional[Example] = inputs.get("example") evaluate_strings_inputs = self.run_mapper(run) if not self.string_evaluator.requires_input:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-6
if not self.string_evaluator.requires_input: # Hide warning about unused input evaluate_strings_inputs.pop("input", None) if example and self.example_mapper and self.string_evaluator.requires_reference: evaluate_strings_inputs.update(self.example_mapper(example)) elif self.string_evaluator.requires_reference: raise ValueError( f"Evaluator {self.name} requires an reference" " example from the dataset," f" but none was provided for run {run.id}." ) return evaluate_strings_inputs def _prepare_output(self, output: Dict[str, Any]) -> Dict[str, Any]: evaluation_result = EvaluationResult( key=self.name, comment=output.get("reasoning"), **output ) if RUN_KEY in output: # TODO: Not currently surfaced. Update evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY] return {"feedback": evaluation_result} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Call the evaluation chain.""" evaluate_strings_inputs = self._prepare_input(inputs) _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() chain_output = self.string_evaluator.evaluate_strings( **evaluate_strings_inputs, callbacks=callbacks, include_run_info=True, ) return self._prepare_output(chain_output) async def _acall( self, inputs: Dict[str, str], run_manager: AsyncCallbackManagerForChainRun | None = None, ) -> Dict[str, Any]: """Call the evaluation chain."""
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-7
) -> Dict[str, Any]: """Call the evaluation chain.""" evaluate_strings_inputs = self._prepare_input(inputs) _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() chain_output = await self.string_evaluator.aevaluate_strings( **evaluate_strings_inputs, callbacks=callbacks, include_run_info=True, ) return self._prepare_output(chain_output) def _prepare_evaluator_output(self, output: Dict[str, Any]) -> EvaluationResult: feedback: EvaluationResult = output["feedback"] if RUN_KEY not in feedback.evaluator_info: feedback.evaluator_info[RUN_KEY] = output[RUN_KEY] return feedback [docs] def evaluate_run( self, run: Run, example: Optional[Example] = None ) -> EvaluationResult: """Evaluate an example.""" result = self({"run": run, "example": example}, include_run_info=True) return self._prepare_evaluator_output(result) [docs] async def aevaluate_run( self, run: Run, example: Optional[Example] = None ) -> EvaluationResult: """Evaluate an example.""" result = await self.acall( {"run": run, "example": example}, include_run_info=True ) return self._prepare_evaluator_output(result) [docs] @classmethod def from_run_and_data_type( cls, evaluator: StringEvaluator, run_type: str, data_type: DataType, input_key: Optional[str] = None, prediction_key: Optional[str] = None, reference_key: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-8
reference_key: Optional[str] = None, tags: Optional[List[str]] = None, ) -> StringRunEvaluatorChain: """ Create a StringRunEvaluatorChain from an evaluator and the run and dataset types. This method provides an easy way to instantiate a StringRunEvaluatorChain, by taking an evaluator and information about the type of run and the data. The method supports LLM and chain runs. Args: evaluator (StringEvaluator): The string evaluator to use. run_type (str): The type of run being evaluated. Supported types are LLM and Chain. data_type (DataType): The type of dataset used in the run. input_key (str, optional): The key used to map the input from the run. prediction_key (str, optional): The key used to map the prediction from the run. reference_key (str, optional): The key used to map the reference from the dataset. tags (List[str], optional): List of tags to attach to the evaluation chain. Returns: StringRunEvaluatorChain: The instantiated evaluation chain. Raises: ValueError: If the run type is not supported, or if the evaluator requires a reference from the dataset but the reference key is not provided. """ # noqa: E501 # Configure how run inputs/predictions are passed to the evaluator if run_type == "llm": run_mapper: StringRunMapper = LLMStringRunMapper() elif run_type == "chain": run_mapper = ChainStringRunMapper( input_key=input_key, prediction_key=prediction_key ) else: raise ValueError( f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'."
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
2afd113f9fba-9
) # Configure how example rows are fed as a reference string to the evaluator if reference_key is not None or data_type in (DataType.llm, DataType.chat): example_mapper = StringExampleMapper(reference_key=reference_key) elif evaluator.requires_reference: raise ValueError( f"Evaluator {evaluator.evaluation_name} requires a reference" " example from the dataset. Please specify the reference key from" " amongst the dataset outputs keys." ) else: example_mapper = None return cls( name=evaluator.evaluation_name, run_mapper=run_mapper, example_mapper=example_mapper, string_evaluator=evaluator, tags=tags, )
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/string_run_evaluator.html
8d277d257744-0
Source code for langchain.smith.evaluation.config """Configuration for run evaluators.""" from typing import Any, Dict, List, Optional, Union from langsmith import RunEvaluator from pydantic import BaseModel, Field from langchain.embeddings.base import Embeddings from langchain.evaluation.criteria.eval_chain import CRITERIA_TYPE from langchain.evaluation.embedding_distance.base import ( EmbeddingDistance as EmbeddingDistanceEnum, ) from langchain.evaluation.schema import EvaluatorType, StringEvaluator from langchain.evaluation.string_distance.base import ( StringDistance as StringDistanceEnum, ) from langchain.schema.language_model import BaseLanguageModel from langchain.schema.prompt_template import BasePromptTemplate [docs]class EvalConfig(BaseModel): """Configuration for a given run evaluator. Parameters ---------- evaluator_type : EvaluatorType The type of evaluator to use. Methods ------- get_kwargs() Get the keyword arguments for the evaluator configuration. """ evaluator_type: EvaluatorType [docs] def get_kwargs(self) -> Dict[str, Any]: """Get the keyword arguments for the load_evaluator call. Returns ------- Dict[str, Any] The keyword arguments for the load_evaluator call. """ kwargs = {} for field, val in self: if field == "evaluator_type": continue kwargs[field] = val return kwargs [docs]class RunEvalConfig(BaseModel): """Configuration for a run evaluation. Parameters ---------- evaluators : List[Union[EvaluatorType, EvalConfig]] Configurations for which evaluators to apply to the dataset run.
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
8d277d257744-1
Configurations for which evaluators to apply to the dataset run. Each can be the string of an :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such as EvaluatorType.QA, the evaluator type string ("qa"), or a configuration for a given evaluator (e.g., :class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`). custom_evaluators : Optional[List[Union[RunEvaluator, StringEvaluator]]] Custom evaluators to apply to the dataset run. reference_key : Optional[str] The key in the dataset run to use as the reference string. If not provided, it will be inferred automatically. prediction_key : Optional[str] The key from the traced run's outputs dictionary to use to represent the prediction. If not provided, it will be inferred automatically. input_key : Optional[str] The key from the traced run's inputs dictionary to use to represent the input. If not provided, it will be inferred automatically. eval_llm : Optional[BaseLanguageModel] The language model to pass to any evaluators that use a language model. """ # noqa: E501 evaluators: List[Union[EvaluatorType, EvalConfig]] = Field(default_factory=list) """Configurations for which evaluators to apply to the dataset run. Each can be the string of an :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a given evaluator (e.g.,
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
8d277d257744-2
given evaluator (e.g., :class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).""" # noqa: E501 custom_evaluators: Optional[List[Union[RunEvaluator, StringEvaluator]]] = None """Custom evaluators to apply to the dataset run.""" reference_key: Optional[str] = None """The key in the dataset run to use as the reference string. If not provided, we will attempt to infer automatically.""" prediction_key: Optional[str] = None """The key from the traced run's outputs dictionary to use to represent the prediction. If not provided, it will be inferred automatically.""" input_key: Optional[str] = None """The key from the traced run's inputs dictionary to use to represent the input. If not provided, it will be inferred automatically.""" eval_llm: Optional[BaseLanguageModel] = None """The language model to pass to any evaluators that require one.""" class Config: arbitrary_types_allowed = True [docs] class Criteria(EvalConfig): """Configuration for a reference-free criteria evaluator. Parameters ---------- criteria : Optional[CRITERIA_TYPE] The criteria to evaluate. llm : Optional[BaseLanguageModel] The language model to use for the evaluation chain. """ criteria: Optional[CRITERIA_TYPE] = None llm: Optional[BaseLanguageModel] = None evaluator_type: EvaluatorType = EvaluatorType.CRITERIA def __init__( self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any ) -> None: super().__init__(criteria=criteria, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
8d277d257744-3
) -> None: super().__init__(criteria=criteria, **kwargs) [docs] class LabeledCriteria(EvalConfig): """Configuration for a labeled (with references) criteria evaluator. Parameters ---------- criteria : Optional[CRITERIA_TYPE] The criteria to evaluate. llm : Optional[BaseLanguageModel] The language model to use for the evaluation chain. """ criteria: Optional[CRITERIA_TYPE] = None llm: Optional[BaseLanguageModel] = None evaluator_type: EvaluatorType = EvaluatorType.LABELED_CRITERIA def __init__( self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any ) -> None: super().__init__(criteria=criteria, **kwargs) [docs] class EmbeddingDistance(EvalConfig): """Configuration for an embedding distance evaluator. Parameters ---------- embeddings : Optional[Embeddings] The embeddings to use for computing the distance. distance_metric : Optional[EmbeddingDistanceEnum] The distance metric to use for computing the distance. """ evaluator_type: EvaluatorType = EvaluatorType.EMBEDDING_DISTANCE embeddings: Optional[Embeddings] = None distance_metric: Optional[EmbeddingDistanceEnum] = None class Config: arbitrary_types_allowed = True [docs] class StringDistance(EvalConfig): """Configuration for a string distance evaluator. Parameters ---------- distance : Optional[StringDistanceEnum] The string distance metric to use. """ evaluator_type: EvaluatorType = EvaluatorType.STRING_DISTANCE distance: Optional[StringDistanceEnum] = None
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
8d277d257744-4
distance: Optional[StringDistanceEnum] = None """The string distance metric to use. damerau_levenshtein: The Damerau-Levenshtein distance. levenshtein: The Levenshtein distance. jaro: The Jaro distance. jaro_winkler: The Jaro-Winkler distance. """ normalize_score: bool = True """Whether to normalize the distance to between 0 and 1. Applies only to the Levenshtein and Damerau-Levenshtein distances.""" [docs] class QA(EvalConfig): """Configuration for a QA evaluator. Parameters ---------- prompt : Optional[BasePromptTemplate] The prompt template to use for generating the question. llm : Optional[BaseLanguageModel] The language model to use for the evaluation chain. """ evaluator_type: EvaluatorType = EvaluatorType.QA llm: Optional[BaseLanguageModel] = None prompt: Optional[BasePromptTemplate] = None [docs] class ContextQA(EvalConfig): """Configuration for a context-based QA evaluator. Parameters ---------- prompt : Optional[BasePromptTemplate] The prompt template to use for generating the question. llm : Optional[BaseLanguageModel] The language model to use for the evaluation chain. """ evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA llm: Optional[BaseLanguageModel] = None prompt: Optional[BasePromptTemplate] = None [docs] class CoTQA(EvalConfig): """Configuration for a context-based QA evaluator. Parameters ---------- prompt : Optional[BasePromptTemplate]
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
8d277d257744-5
Parameters ---------- prompt : Optional[BasePromptTemplate] The prompt template to use for generating the question. llm : Optional[BaseLanguageModel] The language model to use for the evaluation chain. """ evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA llm: Optional[BaseLanguageModel] = None prompt: Optional[BasePromptTemplate] = None # TODO: Trajectory
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/config.html
baafdf8a2eb9-0
Source code for langchain.smith.evaluation.runner_utils """Utilities for running language models or Chains over datasets.""" from __future__ import annotations import asyncio import functools import inspect import itertools import logging import uuid from enum import Enum from typing import ( Any, Callable, Coroutine, Dict, Iterator, List, Optional, Sequence, Tuple, Union, cast, ) from urllib.parse import urlparse, urlunparse from langsmith import Client, RunEvaluator from langsmith.schemas import Dataset, DataType, Example from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.manager import Callbacks from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.chains.base import Chain from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.loading import load_evaluator from langchain.evaluation.schema import EvaluatorType, StringEvaluator from langchain.schema import ChatResult, LLMResult from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, messages_from_dict from langchain.schema.runnable import Runnable, RunnableConfig, RunnableLambda from langchain.smith.evaluation.config import EvalConfig, RunEvalConfig from langchain.smith.evaluation.string_run_evaluator import StringRunEvaluatorChain logger = logging.getLogger(__name__) MODEL_OR_CHAIN_FACTORY = Union[ Callable[[], Union[Chain, Runnable]], BaseLanguageModel, Callable[[dict], Any], Runnable, Chain, ] MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel]
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-1
MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel] [docs]class InputFormatError(Exception): """Raised when the input format is invalid.""" ## Shared Utilities def _get_eval_project_url(api_url: str, project_id: str) -> str: """Get the project url from the api url.""" parsed = urlparse(api_url) hostname = parsed.hostname or "" if "api." in hostname: hostname = hostname.replace("api.", "", 1) if "localhost" in hostname: # Remove the port hostname = "localhost" url = urlunparse(parsed._replace(netloc=hostname)) return f"{url}/projects/p/{project_id}?eval=true" def _wrap_in_chain_factory( llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, dataset_name: str = "<my_dataset>", ) -> MCF: """Forgive the user if they pass in a chain without memory instead of a chain factory. It's a common mistake. Raise a more helpful error message as well.""" if isinstance(llm_or_chain_factory, Chain): chain = llm_or_chain_factory chain_class = chain.__class__.__name__ if llm_or_chain_factory.memory is not None: memory_class = chain.memory.__class__.__name__ raise ValueError( "Cannot directly evaluate a chain with stateful memory." " To evaluate this chain, pass in a chain constructor" " that initializes fresh memory each time it is called." " This will safegaurd against information" " leakage between dataset examples." "\nFor example:\n\n" "def chain_constructor():\n"
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-2
"\nFor example:\n\n" "def chain_constructor():\n" f" new_memory = {memory_class}(...)\n" f" return {chain_class}" "(memory=new_memory, ...)\n\n" f'run_on_dataset("{dataset_name}", chain_constructor, ...)' ) logger.warning( "Directly passing in a chain is not recommended as chains may have state." " This can lead to unexpected behavior as the " "same chain instance could be used across multiple datasets. Instead," " please pass a chain constructor that creates a new " "chain with fresh memory each time it is called. This will safeguard" " against information leakage between dataset examples. " "\nFor example:\n\n" "def chain_constructor():\n" f" return {chain_class}(memory=new_memory, ...)\n\n" f'run_on_dataset("{dataset_name}", chain_constructor, ...)' ) return lambda: chain elif isinstance(llm_or_chain_factory, BaseLanguageModel): return llm_or_chain_factory elif isinstance(llm_or_chain_factory, Runnable): # Memory may exist here, but it's not elegant to check all those cases. lcf = llm_or_chain_factory return lambda: lcf elif callable(llm_or_chain_factory): try: _model = llm_or_chain_factory() # type: ignore[call-arg] except TypeError: # It's an arbitrary function, wrap it in a RunnableLambda user_func = cast(Callable, llm_or_chain_factory) sig = inspect.signature(user_func) logger.info(f"Wrapping function {sig} as RunnableLambda.")
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-3
logger.info(f"Wrapping function {sig} as RunnableLambda.") wrapped = RunnableLambda(user_func) return lambda: wrapped constructor = cast(Callable, llm_or_chain_factory) if isinstance(_model, BaseLanguageModel): # It's not uncommon to do an LLM constructor instead of raw LLM, # so we'll unpack it for the user. return _model elif not isinstance(_model, Runnable): # This is unlikely to happen - a constructor for a model function return lambda: RunnableLambda(constructor) else: # Typical correct case return constructor # noqa return llm_or_chain_factory def _first_example(examples: Iterator[Example]) -> Tuple[Example, Iterator[Example]]: """Get the first example while chaining it back and preserving the iterator.""" try: example: Example = next(examples) except StopIteration: raise ValueError("No examples provided.") return example, itertools.chain([example], examples) def _get_prompt(inputs: Dict[str, Any]) -> str: """Get prompt from inputs. Args: inputs: The input dictionary. Returns: A string prompt. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") prompts = [] if "prompt" in inputs: if not isinstance(inputs["prompt"], str): raise InputFormatError( "Expected string for 'prompt', got" f" {type(inputs['prompt']).__name__}" ) prompts = [inputs["prompt"]] elif "prompts" in inputs:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-4
prompts = [inputs["prompt"]] elif "prompts" in inputs: if not isinstance(inputs["prompts"], list) or not all( isinstance(i, str) for i in inputs["prompts"] ): raise InputFormatError( "Expected list of strings for 'prompts'," f" got {type(inputs['prompts']).__name__}" ) prompts = inputs["prompts"] elif len(inputs) == 1: prompt_ = next(iter(inputs.values())) if isinstance(prompt_, str): prompts = [prompt_] elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_): prompts = prompt_ else: raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}") else: raise InputFormatError( f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}" ) if len(prompts) == 1: return prompts[0] else: raise InputFormatError( f"LLM Run expects single prompt input. Got {len(prompts)} prompts." ) def _get_messages(inputs: Dict[str, Any]) -> List[BaseMessage]: """Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") if "messages" in inputs: single_input = inputs["messages"] elif len(inputs) == 1:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-5
single_input = inputs["messages"] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError( f"Chat Run expects 'messages' in inputs when example has multiple" f" input keys. Got {inputs}" ) if isinstance(single_input, list) and all( isinstance(i, dict) for i in single_input ): raw_messages = [single_input] elif isinstance(single_input, list) and all( isinstance(i, list) for i in single_input ): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] values for" f" 'messages' key input. Got {inputs}" ) if len(raw_messages) == 1: return messages_from_dict(raw_messages[0]) else: raise InputFormatError( f"Chat Run expects single List[dict] or List[List[dict]] 'messages'" f" input. Got {len(raw_messages)} messages from inputs {inputs}" ) def _get_project_name( project_name: Optional[str], llm_or_chain_factory: MCF, ) -> str: """ Get the project name. Args: project_name: The project name if manually specified. llm_or_chain_factory: The Chain or language model constructor. Returns: The project name. """ if project_name is not None: return project_name if isinstance(llm_or_chain_factory, BaseLanguageModel): model_name = llm_or_chain_factory.__class__.__name__ else:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-6
model_name = llm_or_chain_factory.__class__.__name__ else: model_name = llm_or_chain_factory().__class__.__name__ hex = uuid.uuid4().hex return f"{hex}-{model_name}" ## Shared Validation Utilities def _validate_example_inputs_for_language_model( first_example: Example, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: if input_mapper: prompt_input = input_mapper(first_example.inputs) if not isinstance(prompt_input, str) and not ( isinstance(prompt_input, list) and all(isinstance(msg, BaseMessage) for msg in prompt_input) ): raise InputFormatError( "When using an input_mapper to prepare dataset example inputs" " for an LLM or chat model, the output must a single string or" " a list of chat messages." f"\nGot: {prompt_input} of type {type(prompt_input)}." ) else: try: _get_prompt(first_example.inputs) except InputFormatError: try: _get_messages(first_example.inputs) except InputFormatError: raise InputFormatError( "Example inputs do not match language model input format. " "Expected a dictionary with messages or a single prompt." f" Got: {first_example.inputs}" " Please update your dataset OR provide an input_mapper" " to convert the example.inputs to a compatible format" " for the llm or chat model you wish to evaluate." ) def _validate_example_inputs_for_chain( first_example: Example, chain: Chain, input_mapper: Optional[Callable[[Dict], Any]], ) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-7
input_mapper: Optional[Callable[[Dict], Any]], ) -> None: """Validate that the example inputs match the chain input keys.""" if input_mapper: first_inputs = input_mapper(first_example.inputs) missing_keys = set(chain.input_keys).difference(first_inputs) if not isinstance(first_inputs, dict): raise InputFormatError( "When using an input_mapper to prepare dataset example" " inputs for a chain, the mapped value must be a dictionary." f"\nGot: {first_inputs} of type {type(first_inputs)}." ) if missing_keys: raise InputFormatError( "Missing keys after loading example using input_mapper." f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}" ) else: first_inputs = first_example.inputs missing_keys = set(chain.input_keys).difference(first_inputs) if len(first_inputs) == 1 and len(chain.input_keys) == 1: # We can pass this through the run method. # Refrain from calling to validate. pass elif missing_keys: raise InputFormatError( "Example inputs missing expected chain input keys." " Please provide an input_mapper to convert the example.inputs" " to a compatible format for the chain you wish to evaluate." f"Expected: {chain.input_keys}. " f"Got: {first_inputs.keys()}" ) def _validate_example_inputs( examples: Iterator[Example], llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]], ) -> Iterator[Example]: """Validate that the example inputs are valid for the model."""
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-8
"""Validate that the example inputs are valid for the model.""" first_example, examples = _first_example(examples) if isinstance(llm_or_chain_factory, BaseLanguageModel): _validate_example_inputs_for_language_model(first_example, input_mapper) else: chain = llm_or_chain_factory() if isinstance(chain, Chain): # Otherwise it's a runnable _validate_example_inputs_for_chain(first_example, chain, input_mapper) elif isinstance(chain, Runnable): logger.debug(f"Skipping input validation for {chain}") return examples ## Shared Evaluator Setup Utilities def _setup_evaluation( llm_or_chain_factory: MCF, examples: Iterator[Example], evaluation: Optional[RunEvalConfig], data_type: DataType, ) -> Tuple[Optional[List[RunEvaluator]], Iterator[Example]]: """Configure the evaluators to run on the results of the chain.""" if evaluation: first_example, examples = _first_example(examples) if isinstance(llm_or_chain_factory, BaseLanguageModel): run_inputs, run_outputs = None, None run_type = "llm" else: run_type = "chain" if data_type in (DataType.chat, DataType.llm): val = data_type.value if isinstance(data_type, Enum) else data_type raise ValueError( "Cannot evaluate a chain on dataset with " f"data_type={val}. " "Please specify a dataset with the default 'kv' data type." ) chain = llm_or_chain_factory() run_inputs = chain.input_keys if isinstance(chain, Chain) else None run_outputs = chain.output_keys if isinstance(chain, Chain) else None
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-9
run_outputs = chain.output_keys if isinstance(chain, Chain) else None run_evaluators = _load_run_evaluators( evaluation, run_type, data_type, list(first_example.outputs) if first_example.outputs else None, run_inputs, run_outputs, ) else: # TODO: Create a default helpfulness evaluator run_evaluators = None return run_evaluators, examples def _determine_input_key( config: RunEvalConfig, run_inputs: Optional[List[str]], ) -> Optional[str]: input_key = None if config.input_key: input_key = config.input_key if run_inputs and input_key not in run_inputs: raise ValueError(f"Input key {input_key} not in run inputs {run_inputs}") elif run_inputs and len(run_inputs) == 1: input_key = run_inputs[0] elif run_inputs is not None and len(run_inputs) > 1: raise ValueError( f"Must specify input key for model with multiple inputs: {run_inputs}" ) return input_key def _determine_prediction_key( config: RunEvalConfig, run_outputs: Optional[List[str]], ) -> Optional[str]: prediction_key = None if config.prediction_key: prediction_key = config.prediction_key if run_outputs and prediction_key not in run_outputs: raise ValueError( f"Prediction key {prediction_key} not in run outputs {run_outputs}" ) elif run_outputs and len(run_outputs) == 1: prediction_key = run_outputs[0] elif run_outputs is not None and len(run_outputs) > 1: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-10
raise ValueError( f"Must specify prediction key for model" f" with multiple outputs: {run_outputs}" ) return prediction_key def _determine_reference_key( config: RunEvalConfig, example_outputs: Optional[List[str]], ) -> Optional[str]: if config.reference_key: reference_key = config.reference_key if example_outputs and reference_key not in example_outputs: raise ValueError( f"Reference key {reference_key} not in Dataset" f" example outputs: {example_outputs}" ) elif example_outputs and len(example_outputs) == 1: reference_key = list(example_outputs)[0] else: reference_key = None return reference_key def _construct_run_evaluator( eval_config: Union[EvaluatorType, EvalConfig], eval_llm: BaseLanguageModel, run_type: str, data_type: DataType, example_outputs: Optional[List[str]], reference_key: Optional[str], input_key: Optional[str], prediction_key: Optional[str], ) -> RunEvaluator: if isinstance(eval_config, EvaluatorType): evaluator_ = load_evaluator(eval_config, llm=eval_llm) eval_type_tag = eval_config.value else: kwargs = {"llm": eval_llm, **eval_config.get_kwargs()} evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs) eval_type_tag = eval_config.evaluator_type.value if isinstance(evaluator_, StringEvaluator): if evaluator_.requires_reference and reference_key is None: raise ValueError( f"Must specify reference_key in RunEvalConfig to use" f" evaluator of type {eval_type_tag} with"
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-11
f" evaluator of type {eval_type_tag} with" f" dataset with multiple output keys: {example_outputs}." ) run_evaluator = StringRunEvaluatorChain.from_run_and_data_type( evaluator_, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, tags=[eval_type_tag], ) else: raise NotImplementedError( f"Run evaluator for {eval_type_tag} is not implemented" ) return run_evaluator def _load_run_evaluators( config: RunEvalConfig, run_type: str, data_type: DataType, example_outputs: Optional[List[str]], run_inputs: Optional[List[str]], run_outputs: Optional[List[str]], ) -> List[RunEvaluator]: """ Load run evaluators from a configuration. Args: config: Configuration for the run evaluators. Returns: A list of run evaluators. """ eval_llm = config.eval_llm or ChatOpenAI(model="gpt-4", temperature=0.0) run_evaluators = [] input_key = _determine_input_key(config, run_inputs) prediction_key = _determine_prediction_key(config, run_outputs) reference_key = _determine_reference_key(config, example_outputs) for eval_config in config.evaluators: run_evaluator = _construct_run_evaluator( eval_config, eval_llm, run_type, data_type, example_outputs, reference_key, input_key, prediction_key, ) run_evaluators.append(run_evaluator)
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-12
prediction_key, ) run_evaluators.append(run_evaluator) custom_evaluators = config.custom_evaluators or [] for custom_evaluator in custom_evaluators: if isinstance(custom_evaluator, RunEvaluator): run_evaluators.append(custom_evaluator) elif isinstance(custom_evaluator, StringEvaluator): run_evaluators.append( StringRunEvaluatorChain.from_run_and_data_type( custom_evaluator, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, ) ) else: raise ValueError( f"Unsupported custom evaluator: {custom_evaluator}." f" Expected RunEvaluator or StringEvaluator." ) return run_evaluators ### Async Helpers async def _arun_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], *, tags: Optional[List[str]] = None, callbacks: Callbacks = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """Asynchronously run the language model. Args: llm: The language model to run. inputs: The input dictionary. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map inputs to the expected format. Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-13
""" if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): return await llm.apredict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): return await llm.apredict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format" f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: prompt = _get_prompt(inputs) llm_output: Union[str, BaseMessage] = await llm.apredict( prompt, callbacks=callbacks, tags=tags ) except InputFormatError: messages = _get_messages(inputs) llm_output = await llm.apredict_messages( messages, callbacks=callbacks, tags=tags ) return llm_output async def _arun_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str]: """Run a chain asynchronously on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if isinstance(chain, Chain): if isinstance(inputs_, dict) and len(inputs_) == 1: val = next(iter(inputs_.values()))
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-14
val = next(iter(inputs_.values())) output = await chain.acall(val, callbacks=callbacks, tags=tags) else: output = await chain.acall(inputs_, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = await chain.ainvoke(inputs_, config=runnable_config) return output async def _arun_llm_or_chain( example: Example, llm_or_chain_factory: MCF, n_repetitions: int, *, tags: Optional[List[str]] = None, callbacks: Optional[List[BaseCallbackHandler]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: """Asynchronously run the Chain or language model. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. n_repetitions: The number of times to run the model on each example. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map the input to the expected format. Returns: A list of outputs. """ if callbacks: previous_example_ids = [ getattr(tracer, "example_id", None) for tracer in callbacks ] for tracer in callbacks: if hasattr(tracer, "example_id"): tracer.example_id = example.id else: previous_example_ids = None outputs = [] chain_or_llm = (
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-15
previous_example_ids = None outputs = [] chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) for _ in range(n_repetitions): try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = await _arun_llm( llm_or_chain_factory, example.inputs, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = await _arun_chain( chain, example.inputs, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) outputs.append(output) except Exception as e: logger.warning( f"{chain_or_llm} failed for example {example.id}. Error: {e}" ) outputs.append({"Error": str(e)}) if callbacks and previous_example_ids: for example_id, tracer in zip(previous_example_ids, callbacks): if hasattr(tracer, "example_id"): tracer.example_id = example_id return outputs async def _gather_with_concurrency( n: int, initializer: Callable[[], Coroutine[Any, Any, Any]], *async_funcs: Callable[ [Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any] ], ) -> List[Any]: """Run coroutines with a concurrency limit. Args: n: The maximum number of concurrent tasks. initializer: A coroutine that initializes shared resources for the tasks. async_funcs: The async_funcs to be run concurrently. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-16
async_funcs: The async_funcs to be run concurrently. Returns: A list of results from the coroutines. """ semaphore = asyncio.Semaphore(n) job_state = {"num_processed": 0} callback_queue: asyncio.Queue[Sequence[BaseCallbackHandler]] = asyncio.Queue() for _ in range(n): callback_queue.put_nowait(await initializer()) async def run_coroutine_with_semaphore( async_func: Callable[ [Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any] ] ) -> Any: async with semaphore: callbacks = await callback_queue.get() try: result = await async_func(callbacks, job_state) finally: callback_queue.put_nowait(callbacks) return result results = await asyncio.gather( *(run_coroutine_with_semaphore(function) for function in async_funcs) ) while callback_queue: try: callbacks = callback_queue.get_nowait() except asyncio.QueueEmpty: break for callback in callbacks: if isinstance(callback, (LangChainTracer, EvaluatorCallbackHandler)): callback.wait_for_futures() return results async def _callbacks_initializer( project_name: Optional[str], client: Client, run_evaluators: Sequence[RunEvaluator], evaluation_handler_collector: List[EvaluatorCallbackHandler], ) -> List[BaseTracer]: """ Initialize a tracer to share across tasks. Args: project_name: The project name for the tracer. client: The client to use for the tracer. run_evaluators: The evaluators to run.
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-17
run_evaluators: The evaluators to run. evaluation_handler_collector: A list to collect the evaluators. Used to wait for the evaluators to finish. Returns: The callbacks for this thread. """ callbacks: List[BaseTracer] = [] if project_name: callbacks.append( LangChainTracer( project_name=project_name, client=client, use_threading=False ) ) if run_evaluators: callback = EvaluatorCallbackHandler( client=client, evaluators=run_evaluators, # We already have concurrency, don't want to overload the machine max_workers=1, ) callbacks.append(callback) evaluation_handler_collector.append(callback) return callbacks async def _arun_on_examples( client: Client, examples: Iterator[Example], llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[RunEvalConfig] = None, concurrency_level: int = 5, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, data_type: DataType = DataType.kv, ) -> Dict[str, Any]: """ Asynchronously run the chain on examples and store traces to the specified project name. Args: client: LangSmith client to use to log feedback and runs. examples: Examples to run the model or chain over. llm_or_chain_factory: Language model or Chain constructor to run
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-18
llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Optional evaluation configuration to use when evaluating concurrency_level: The number of async tasks to run concurrently. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Project name to use when tracing runs. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. tags: Tags to add to each run in the project. input_mapper: function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. data_type: The dataset's data type. This is used to determine determine how to deserialize the reference data and model compatibility. Returns: A dictionary mapping example ids to the model outputs. """ wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory) project_name = _get_project_name(project_name, wrapped_model) run_evaluators, examples = _setup_evaluation( wrapped_model, examples, evaluation, data_type ) examples = _validate_example_inputs(examples, wrapped_model, input_mapper) results: Dict[str, List[Any]] = {} async def process_example( example: Example, callbacks: List[BaseCallbackHandler], job_state: dict ) -> None: """Process a single example."""
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-19
) -> None: """Process a single example.""" result = await _arun_llm_or_chain( example, wrapped_model, num_repetitions, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) results[str(example.id)] = result job_state["num_processed"] += 1 if verbose: print( f"Processed examples: {job_state['num_processed']}", end="\r", flush=True, ) evaluation_handlers: List[EvaluatorCallbackHandler] = [] await _gather_with_concurrency( concurrency_level, functools.partial( _callbacks_initializer, project_name=project_name, client=client, evaluation_handler_collector=evaluation_handlers, run_evaluators=run_evaluators or [], ), *(functools.partial(process_example, e) for e in examples), ) for handler in evaluation_handlers: handler.wait_for_futures() return results ## Sync Utilities def _run_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """ Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-20
input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): llm_output: Union[str, BaseMessage] = llm.predict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): llm_output = llm.predict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format: " f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: llm_prompts = _get_prompt(inputs) llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags) except InputFormatError: llm_messages = _get_messages(inputs) llm_output = llm.predict_messages(llm_messages, callbacks=callbacks) return llm_output def _run_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[Dict, str]: """Run a chain on inputs."""
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-21
) -> Union[Dict, str]: """Run a chain on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if isinstance(chain, Chain): if isinstance(inputs_, dict) and len(inputs_) == 1: val = next(iter(inputs_.values())) output = chain(val, callbacks=callbacks, tags=tags) else: output = chain(inputs_, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = chain.invoke(inputs_, config=runnable_config) return output def _run_llm_or_chain( example: Example, llm_or_chain_factory: MCF, n_repetitions: int, *, tags: Optional[List[str]] = None, callbacks: Optional[List[BaseCallbackHandler]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: """ Run the Chain or language model synchronously. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. n_repetitions: The number of times to run the model on each example. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. Returns: Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: The outputs of the model or chain. """ if callbacks: previous_example_ids = [ getattr(tracer, "example_id", None) for tracer in callbacks ]
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-22
getattr(tracer, "example_id", None) for tracer in callbacks ] for tracer in callbacks: if hasattr(tracer, "example_id"): tracer.example_id = example.id else: previous_example_ids = None outputs = [] chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) for _ in range(n_repetitions): try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = _run_llm( llm_or_chain_factory, example.inputs, callbacks, tags=tags, input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = _run_chain( chain, example.inputs, callbacks, tags=tags, input_mapper=input_mapper, ) outputs.append(output) except Exception as e: logger.warning( f"{chain_or_llm} failed for example {example.id} with inputs:" f" {example.inputs}.\nError: {e}", ) outputs.append({"Error": str(e)}) if callbacks and previous_example_ids: for example_id, tracer in zip(previous_example_ids, callbacks): if hasattr(tracer, "example_id"): tracer.example_id = example_id return outputs def _run_on_examples( client: Client, examples: Iterator[Example], llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[RunEvalConfig] = None, num_repetitions: int = 1,
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-23
num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, data_type: DataType = DataType.kv, ) -> Dict[str, Any]: """ Run the Chain or language model on examples and store traces to the specified project name. Args: client: LangSmith client to use to log feedback and runs. examples: Examples to run the model or chain over. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Optional evaluation configuration to use when evaluating num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. tags: Tags to add to each run in the project. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. data_type: The dataset's data type. This is used to determine determine how to deserialize the reference data and model compatibility. Returns: A dictionary mapping example ids to the model outputs. """ results: Dict[str, Any] = {}
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-24
""" results: Dict[str, Any] = {} llm_or_chain_factory = _wrap_in_chain_factory(llm_or_chain_factory) project_name = _get_project_name(project_name, llm_or_chain_factory) tracer = LangChainTracer( project_name=project_name, client=client, use_threading=False ) run_evaluators, examples = _setup_evaluation( llm_or_chain_factory, examples, evaluation, data_type ) examples = _validate_example_inputs(examples, llm_or_chain_factory, input_mapper) evalution_handler = EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client, ) callbacks: List[BaseCallbackHandler] = [tracer, evalution_handler] for i, example in enumerate(examples): result = _run_llm_or_chain( example, llm_or_chain_factory, num_repetitions, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) if verbose: print(f"{i+1} processed", flush=True, end="\r") results[str(example.id)] = result tracer.wait_for_futures() evalution_handler.wait_for_futures() return results ## Public API def _prepare_eval_run( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: Optional[str], ) -> Tuple[MCF, str, Dataset, Iterator[Example]]: llm_or_chain_factory = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name) project_name = _get_project_name(project_name, llm_or_chain_factory) try:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-25
try: project = client.create_project(project_name) except ValueError as e: if "already exists " not in str(e): raise e raise ValueError( f"Project {project_name} already exists. Please use a different name." ) project_url = _get_eval_project_url(client.api_url, project.id) print( f"View the evaluation results for project '{project_name}' at:\n{project_url}" ) dataset = client.read_dataset(dataset_name=dataset_name) examples = client.list_examples(dataset_id=str(dataset.id)) return llm_or_chain_factory, project_name, dataset, examples [docs]async def arun_on_dataset( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[RunEvalConfig] = None, concurrency_level: int = 5, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """ Asynchronously run the Chain or language model on a dataset and store traces to the specified project name. Args: client: LangSmith client to use to read the dataset, and to log feedback and run traces. dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Optional evaluation configuration to use when evaluating
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-26
evaluation: Optional evaluation configuration to use when evaluating concurrency_level: The number of async tasks to run concurrently. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. tags: Tags to add to each run in the project. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary containing the run's project name and the resulting model outputs. For the synchronous version, see :func:`run_on_dataset`. Examples -------- .. code-block:: python from langsmith import Client from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import RunEvalConfig, arun_on_dataset # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string( llm, "What's the answer to {your_input_key}" ) return chain # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) evaluation_config = RunEvalConfig( evaluators=[
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-27
evaluation_config = RunEvalConfig( evaluators=[ "qa", # "Correctness" against a reference answer "embedding_distance", RunEvalConfig.Criteria("helpfulness"), RunEvalConfig.Criteria({ "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" }), ] ) client = Client() await arun_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) You can also create custom evaluators by subclassing the :class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>` or LangSmith's `RunEvaluator` classes. .. code-block:: python from typing import Optional from langchain.evaluation import StringEvaluator class MyStringEvaluator(StringEvaluator): @property def requires_input(self) -> bool: return False @property def requires_reference(self) -> bool: return True @property def evaluation_name(self) -> str: return "exact_match" def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict: return {"score": prediction == reference} evaluation_config = RunEvalConfig( custom_evaluators = [MyStringEvaluator()], ) await arun_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) """ # noqa: E501 llm_or_chain_factory, project_name, dataset, examples = _prepare_eval_run( client, dataset_name, llm_or_chain_factory, project_name )
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-28
client, dataset_name, llm_or_chain_factory, project_name ) results = await _arun_on_examples( client, examples, llm_or_chain_factory, concurrency_level=concurrency_level, num_repetitions=num_repetitions, project_name=project_name, verbose=verbose, tags=tags, evaluation=evaluation, input_mapper=input_mapper, data_type=dataset.data_type, ) return { "project_name": project_name, "results": results, } def _handle_coroutine(coro: Coroutine) -> Any: """ Handles a coroutine from a sync context. Args: coro (asyncio.coroutine): The coroutine to be handled. Returns: any: The result of the executed coroutine. """ # Check if there's a running event loop try: loop = asyncio.get_event_loop() except RuntimeError: # No event loop return asyncio.run(coro) if loop.is_running(): return loop.create_task(coro) else: return asyncio.run(coro) [docs]def run_on_dataset( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[RunEvalConfig] = None, num_repetitions: int = 1, concurrency_level: int = 5, project_name: Optional[str] = None, verbose: bool = False, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-29
) -> Dict[str, Any]: """ Run the Chain or language model on a dataset and store traces to the specified project name. Args: client: LangSmith client to use to access the dataset and to log feedback and run traces. dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Configuration for evaluators to run on the results of the chain concurrency_level: The number of async tasks to run concurrently. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. tags: Tags to add to each run in the project. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary containing the run's project name and the resulting model outputs. For the (usually faster) async version of this function, see :func:`arun_on_dataset`. Examples -------- .. code-block:: python from langsmith import Client from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import RunEvalConfig, run_on_dataset
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-30
from langchain.smith import RunEvalConfig, run_on_dataset # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string( llm, "What's the answer to {your_input_key}" ) return chain # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) evaluation_config = RunEvalConfig( evaluators=[ "qa", # "Correctness" against a reference answer "embedding_distance", RunEvalConfig.Criteria("helpfulness"), RunEvalConfig.Criteria({ "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" }), ] ) client = Client() run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) You can also create custom evaluators by subclassing the :class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>` or LangSmith's `RunEvaluator` classes. .. code-block:: python from typing import Optional from langchain.evaluation import StringEvaluator class MyStringEvaluator(StringEvaluator): @property def requires_input(self) -> bool: return False @property def requires_reference(self) -> bool: return True @property def evaluation_name(self) -> str: return "exact_match" def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
baafdf8a2eb9-31
return {"score": prediction == reference} evaluation_config = RunEvalConfig( custom_evaluators = [MyStringEvaluator()], ) run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) """ # noqa: E501 llm_or_chain_factory, project_name, dataset, examples = _prepare_eval_run( client, dataset_name, llm_or_chain_factory, project_name ) if concurrency_level in (0, 1): results = _run_on_examples( client, examples, llm_or_chain_factory, num_repetitions=num_repetitions, project_name=project_name, verbose=verbose, tags=tags, evaluation=evaluation, input_mapper=input_mapper, data_type=dataset.data_type, ) else: # TODO: Use runnables and the batch method coro = _arun_on_examples( client, examples, llm_or_chain_factory, concurrency_level=concurrency_level, num_repetitions=num_repetitions, project_name=project_name, verbose=verbose, tags=tags, evaluation=evaluation, input_mapper=input_mapper, data_type=dataset.data_type, ) results = _handle_coroutine(coro) return { "project_name": project_name, "results": results, }
https://api.python.langchain.com/en/latest/_modules/langchain/smith/evaluation/runner_utils.html
83425ba005ea-0
Source code for langchain_experimental.llms.rellm_decoder """Experimental implementation of RELLM wrapped LLM.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.utils import enforce_stop_tokens from pydantic import Field, root_validator if TYPE_CHECKING: import rellm from regex import Pattern as RegexPattern else: try: from regex import Pattern as RegexPattern except ImportError: pass [docs]def import_rellm() -> rellm: """Lazily import rellm.""" try: import rellm except ImportError: raise ImportError( "Could not import rellm python package. " "Please install it with `pip install rellm`." ) return rellm [docs]class RELLM(HuggingFacePipeline): """RELLM wrapped LLM using HuggingFace Pipeline API.""" regex: RegexPattern = Field(..., description="The structured format to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) @root_validator def check_rellm_installation(cls, values: dict) -> dict: import_rellm() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: rellm = import_rellm()
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/rellm_decoder.html
83425ba005ea-1
) -> str: rellm = import_rellm() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) text = rellm.complete_re( prompt, self.regex, tokenizer=pipeline.tokenizer, model=pipeline.model, max_new_tokens=self.max_new_tokens, ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/rellm_decoder.html
e535e2ad4e9f-0
Source code for langchain_experimental.llms.jsonformer_decoder """Experimental implementation of jsonformer wrapped LLM.""" from __future__ import annotations import json from typing import TYPE_CHECKING, Any, List, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.huggingface_pipeline import HuggingFacePipeline from pydantic import Field, root_validator if TYPE_CHECKING: import jsonformer [docs]def import_jsonformer() -> jsonformer: """Lazily import jsonformer.""" try: import jsonformer except ImportError: raise ImportError( "Could not import jsonformer python package. " "Please install it with `pip install jsonformer`." ) return jsonformer [docs]class JsonFormer(HuggingFacePipeline): """Jsonformer wrapped LLM using HuggingFace Pipeline API. This pipeline is experimental and not yet stable. """ json_schema: dict = Field(..., description="The JSON Schema to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) debug: bool = Field(default=False, description="Debug mode.") @root_validator def check_jsonformer_installation(cls, values: dict) -> dict: import_jsonformer() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: jsonformer = import_jsonformer() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/jsonformer_decoder.html