id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
a947e9d153c6-3 | """Clear session memory from Zep. Note that Zep is long-term storage for memory
and this is not advised unless you have specific data retention requirements.
"""
try:
self.zep_client.delete_memory(self.session_id)
except NotFoundError:
logger.warning(
... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html |
0a03539643f7-0 | Source code for langchain.memory.chat_message_histories.redis
import json
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
[docs]class RedisChatMessageHistory(... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html |
0a03539643f7-1 | """Append the message to the record in Redis"""
self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
[docs] def clear(self) -> None:
"""Clear session memory from Redis"""
self.redis_client.delete... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html |
e1c467464fa0-0 | Source code for langchain.memory.chat_message_histories.postgres
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://p... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
e1c467464fa0-1 | items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSER... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html |
08cfa072edc1-0 | Source code for langchain.memory.chat_message_histories.cassandra
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import typing
from typing import List
if typing.TYPE_CHECKING:
from cassandra.cluster import Session
from langchain.schema import (
BaseCh... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
08cfa072edc1-1 | @property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
message_blobs = self.blob_history.retrieve(
self.session_id,
)
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_f... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html |
ca8caa9492b5-0 | Source code for langchain.memory.chat_message_histories.mongodb
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLL... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html |
ca8caa9492b5-1 | except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: Base... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html |
93efad5c60e0-0 | Source code for langchain.memory.chat_message_histories.momento
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
from l... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
93efad5c60e0-1 | Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name ... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
93efad5c60e0-2 | def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache ... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
93efad5c60e0-3 | return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
[docs] def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Ar... | https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html |
065f46933ba7-0 | Source code for langchain.load.dump
import json
from typing import Any, Dict
from langchain.load.serializable import Serializable, to_json_not_implemented
[docs]def default(obj: Any) -> Any:
"""Return a default value for a Serializable object or
a SerializedNotImplemented object."""
if isinstance(obj, Seria... | https://api.python.langchain.com/en/latest/_modules/langchain/load/dump.html |
0f6c15b73856-0 | Source code for langchain.load.load
import importlib
import json
import os
from typing import Any, Dict, Optional
from langchain.load.serializable import Serializable
class Reviver:
def __init__(self, secrets_map: Optional[Dict[str, str]] = None) -> None:
self.secrets_map = secrets_map or dict()
def __c... | https://api.python.langchain.com/en/latest/_modules/langchain/load/load.html |
0f6c15b73856-1 | # The root namespace "langchain" is not a valid identifier.
if len(namespace) == 1:
raise ValueError(f"Invalid namespace: {value}")
mod = importlib.import_module(".".join(namespace))
cls = getattr(mod, name)
# The class must be a subclass of Serializable.
... | https://api.python.langchain.com/en/latest/_modules/langchain/load/load.html |
753c02d98e98-0 | Source code for langchain.load.serializable
from abc import ABC
from typing import Any, Dict, List, Literal, TypedDict, Union, cast
from pydantic import BaseModel, PrivateAttr
[docs]class BaseSerialized(TypedDict):
"""Base class for serialized objects."""
lc: int
id: List[str]
[docs]class SerializedConstruc... | https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html |
753c02d98e98-1 | return {}
[docs] class Config:
extra = "ignore"
_lc_kwargs = PrivateAttr(default_factory=dict)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._lc_kwargs = kwargs
[docs] def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
... | https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html |
753c02d98e98-2 | return {
"lc": 1,
"type": "constructor",
"id": [*self.lc_namespace, self.__class__.__name__],
"kwargs": lc_kwargs
if not secrets
else _replace_secrets(lc_kwargs, secrets),
}
[docs] def to_json_not_implemented(self) -> SerializedNotImplem... | https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html |
753c02d98e98-3 | except Exception:
pass
return {
"lc": 1,
"type": "not_implemented",
"id": _id,
} | https://api.python.langchain.com/en/latest/_modules/langchain/load/serializable.html |
3ddd91a7293b-0 | Source code for langchain.client.runner_utils
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import logging
from datetime import datetime
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-1 | prompts = []
if "prompt" in inputs:
if not isinstance(inputs["prompt"], str):
raise InputFormatError(
"Expected string for 'prompt', got"
f" {type(inputs['prompt']).__name__}"
)
prompts = [inputs["prompt"]]
elif "prompts" in inputs:
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-2 | if "messages" in inputs:
single_input = inputs["messages"]
elif len(inputs) == 1:
single_input = next(iter(inputs.values()))
else:
raise InputFormatError(f"Chat Run expects 'messages' in inputs. Got {inputs}")
if isinstance(single_input, list) and all(
isinstance(i, dict) for... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-3 | """
if input_mapper is not None:
if not isinstance(llm, (BaseLLM, BaseChatModel)):
raise ValueError(f"Unsupported LLM type {type(llm).__name__}")
llm_output = await llm.agenerate(
input_mapper(inputs), callbacks=callbacks, tags=tags
)
elif isinstance(llm, BaseLLM)... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-4 | *,
tags: Optional[List[str]] = None,
callbacks: Optional[List[BaseCallbackHandler]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
"""
Asynchronously run the Chain or language model.
Args:
example: The e... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-5 | inputs_ = next(iter(inputs_.values()))
output = await chain.acall(inputs_, callbacks=callbacks, tags=tags)
outputs.append(output)
except Exception as e:
logger.warning(f"Chain failed for example {example.id}. Error: {e}")
outputs.append({"Error": str(e)})
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-6 | result = await async_func(callbacks, job_state)
finally:
callback_queue.put_nowait(callbacks)
return result
results = await asyncio.gather(
*(run_coroutine_with_semaphore(function) for function in async_funcs)
)
while callback_queue:
try:
c... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-7 | project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optional[Sequence[RunEvaluator]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Asynchronously ru... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-8 | Returns:
A dictionary mapping example ids to the model outputs.
"""
project_name = _get_project_name(project_name, llm_or_chain_factory, None)
client_ = client or LangChainPlusClient()
client_.create_project(project_name, mode="eval")
results: Dict[str, List[Any]] = {}
evaluation_handler... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-9 | *,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[LLMResult, ChatResult]:
"""
Run the language model on the example.
Args:
llm: The language model to run.
inputs: The input dictionary.
callbacks: The callbacks to use during ... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-10 | except InputFormatError:
prompts = _get_prompts(inputs)
converted_messages: List[List[BaseMessage]] = [
[HumanMessage(content=prompt)] for prompt in prompts
]
llm_output = llm.generate(
converted_messages, callbacks=callbacks, tags=tags
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-11 | outputs = []
for _ in range(n_repetitions):
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = run_llm(
llm_or_chain_factory,
example.inputs,
callbacks,
tags=tags,
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-12 | traces to the specified project name.
Args:
examples: Examples to run the model or chain over.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over st... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-13 | evaluators=run_evaluators or [], client=client_
)
callbacks: List[BaseCallbackHandler] = [tracer, evalution_handler]
for i, example in enumerate(examples):
result = run_llm_or_chain(
example,
llm_or_chain_factory,
num_repetitions,
tags=tags,
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-14 | dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
concurrency_level: int = 5,
num_repetitions: int = 1,
project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optiona... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-15 | to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary containing the run's project ... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-16 | """
Run the Chain or language model on a dataset and store traces
to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
3ddd91a7293b-17 | results = run_on_examples(
examples,
llm_or_chain_factory,
num_repetitions=num_repetitions,
project_name=project_name,
verbose=verbose,
tags=tags,
run_evaluators=run_evaluators,
client=client_,
input_mapper=input_mapper,
)
return {
... | https://api.python.langchain.com/en/latest/_modules/langchain/client/runner_utils.html |
54f3c06d17de-0 | Source code for langchain.evaluation.schema
"""Interfaces to be implemented by general evaluators."""
from abc import abstractmethod
from typing import Any, Optional, Protocol, runtime_checkable
[docs]@runtime_checkable
class StringEvaluator(Protocol):
"""Protocol for evaluating strings."""
[docs] @abstractmetho... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/schema.html |
54f3c06d17de-1 | """
raise NotImplementedError(
f"{self.__class__.__name__} hasn't implemented an "
"async aevaluate_strings method."
)
[docs]@runtime_checkable
class PairwiseStringEvaluator(Protocol):
"""A protocol for comparing the output of two models."""
[docs] @abstractmethod
def ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/schema.html |
54f3c06d17de-2 | input (str, optional): The input string. Defaults to None.
**kwargs (Any): Additional keyword arguments, such
as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or
other information.
"""
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/schema.html |
c845a89b29a6-0 | Source code for langchain.evaluation.loading
from typing import Dict, List
[docs]def load_dataset(uri: str) -> List[Dict]:
from datasets import load_dataset
dataset = load_dataset(f"LangChainDatasets/{uri}")
return [d for d in dataset["train"]] | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/loading.html |
b0de7847cfa3-0 | Source code for langchain.evaluation.criteria.eval_chain
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
from pydantic import Field
from langchain.base_language import BaseLanguageModel
from langchain.chains.constitutional_ai.models import ConstitutionalPrincipl... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-1 | """Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
reasoning, verdict = text.strip().rsplit("\n", maxsplit=1)
score = 1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None)
r... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-2 | Additional keyword arguments to pass to the `LLMChain` constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain.chat_models import ChatAnthropic
>>> from langchain.evaluation.criteria import CriteriaEvalChain
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-3 | - a mapping of criterion names to descriptions
- a sequence of criterion names
- a single criterion name present in one of the default criteria
- a sequence of `ConstitutionalPrinciple` instances
- a single `ConstitutionalPrinciple` instance
R... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-4 | requires_reference: bool = False,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-5 | ),
}
>>> chain = CriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
requires_reference=True,
)
"""
if prompt is None:
if requires_reference:
prompt = PROMPT_WITH_REFERENCES
else:
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-6 | The input text used to generate the prediction.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` `__call__`
method.
Returns
-------
dict
The evaluation results.
Examples
--------
>>> from langchain.llms impo... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
b0de7847cfa3-7 | Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> await chain.aevaluate_st... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/criteria/eval_chain.html |
a00ac0f83c17-0 | Source code for langchain.evaluation.agents.trajectory_eval_chain
"""A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes. It uses a language model
chain (LLMChain) to generate the reasoning and scores.
"""
from ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-1 | """
if "Score:" not in text:
raise OutputParserException(
f"Could not find score in model eval output: {text}"
)
reasoning, score_str = text.split("Score: ")
reasoning, score_str = reasoning.strip(), score_str.strip()
score_str = next(
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-2 | return_intermediate_steps=True,
)
question = "How many dwell in the largest minor region in Argentina?"
response = agent(question)
eval_chain = TrajectoryEvalChain.from_llm(
llm=llm, agent_tools=[geography_answers], return_reasoning=True
)
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-3 | """Get the agent trajectory as a formatted string.
Args:
steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory.
Returns:
str: The formatted agent trajectory.
"""
if isinstance(steps, str):
return steps
return "\n\n".join(
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-4 | used to parse the chain output into a score.
return_reasoning (bool): Whether to return the
reasoning along with the score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-5 | inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new k... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-6 | return {"score": parsed_output.score, "reasoning": parsed_output.reasoning}
return {"score": parsed_output.score}
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and gener... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
a00ac0f83c17-7 | reference (Optional[str]): The reference answer.
Returns:
dict: The evaluation result.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": self._format_re... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/agents/trajectory_eval_chain.html |
529e2c019bee-0 | Source code for langchain.evaluation.qa.generate_chain
"""LLM Chain specifically for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.gener... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/generate_chain.html |
710c8ec171a8-0 | Source code for langchain.evaluation.qa.eval_chain
"""LLM Chain specifically for evaluating question answering."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from langchain import PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manag... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/eval_chain.html |
710c8ec171a8-1 | Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
expected_input_vars = {"query", "answer", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/eval_chain.html |
710c8ec171a8-2 | reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
**kwargs: additional keyword arguments, including ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/eval_chain.html |
710c8ec171a8-3 | def from_llm(
cls,
llm: BaseLanguageModel,
prompt: PromptTemplate = CONTEXT_PROMPT,
**kwargs: Any,
) -> ContextQAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/eval_chain.html |
710c8ec171a8-4 | **kwargs: Any,
) -> dict:
result = self.evaluate(
examples=[{"query": input, "context": reference}],
predictions=[{"result": prediction}],
callbacks=kwargs.get("callbacks"),
)[0]
return _parse_string_eval_output(result["text"])
[docs] async def aevaluat... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/qa/eval_chain.html |
0ce7b92b6045-0 | Source code for langchain.evaluation.comparison.eval_chain
"""Base classes for comparing the output of two models."""
from __future__ import annotations
from typing import Any, Optional
from pydantic import Field
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
fro... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/comparison/eval_chain.html |
0ce7b92b6045-1 | return {
"reasoning": reasoning,
"value": verdict_,
"score": score,
}
[docs]class PairwiseStringEvalChain(LLMChain):
"""A chain for comparing the output of two models.
Example:
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.comp... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/comparison/eval_chain.html |
0ce7b92b6045-2 | require_reference: bool = False,
**kwargs: Any,
) -> PairwiseStringEvalChain:
"""Initialize the PairwiseStringEvalChain from an LLM.
Args:
llm (BaseLanguageModel): The LLM to use.
prompt (PromptTemplate, optional): The prompt to use.
require_reference (boo... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/comparison/eval_chain.html |
0ce7b92b6045-3 | self,
*,
prediction: str,
prediction_b: str,
input: str,
reference: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Evaluate whether output A is preferred to output B.
Args:
prediction (str): The outp... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/comparison/eval_chain.html |
0ce7b92b6045-4 | Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/comparison/eval_chain.html |
587d510a7bad-0 | Source code for langchain.evaluation.run_evaluators.implementations
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
from langchainplus_sdk.evaluation import EvaluationResult
from langchainplus_sdk.schemas import Example, Run, RunTypeEnum
from pydantic import BaseModel, Field
from langchain.base_l... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-1 | """Map from run inputs to the evaluation inputs."""
answer_map: Optional[Dict[str, str]] = None
"""Map from example outputs to the evaluation inputs."""
[docs] def map(self, run: Run, example: Optional[Example] = None) -> Dict[str, Any]:
"""Maps the Run and Optional[Example] to a dictionary"""
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-2 | return EvaluationResult(
key=self.evaluation_name,
score=score,
value=value,
comment=comment,
)
[docs]def get_qa_evaluator(
llm: BaseLanguageModel,
*,
prompt: Union[PromptTemplate, str] = QA_DEFAULT_PROMPT,
input_key: str = "input",
prediction_... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-3 | """Parse a criteria results into an evaluation result."""
evaluation_name: str
@property
def _type(self) -> str:
return "criteria"
[docs] def parse(self, parsed_output: Union[str, dict]) -> EvaluationResult:
"""Parse the last line of the text and return an evaluation result."""
if... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-4 | ),
)
tags = kwargs.pop("tags", [])
eval_chain = CriteriaEvalChain.from_llm(
llm=llm, criteria=criteria_, prompt=prompt, **kwargs
)
return RunEvaluatorChain(
eval_chain=eval_chain,
input_mapper=input_mapper,
output_parser=parser,
tags=tags + [evaluation_name],
... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-5 | )
[docs]class TrajectoryInputMapper(RunEvaluatorInputMapper, BaseModel):
"""Maps the Run and Optional[Example] to a dictionary."""
tool_descriptions: List[str]
"""The descriptions for each of the tools available to the agent."""
agent_input_key: str = "input"
"""The key to load from the agent execut... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-6 | f"""Step {i}:
Tool used: {run_.name}
Tool input: {run_.inputs.get(self.tool_input_key, run_.inputs)}
Tool output: {tool_output}"""
)
return {
"tool_descriptions": "\n\n".join(self.tool_descriptions),
"question": question,
"agent_trajectory": "\n\n".join(agent_step... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
587d510a7bad-7 | ),
)
parser = kwargs.pop(
"output_parser",
TrajectoryEvalOutputParser(evaluation_name=evaluation_name),
)
eval_chain = LLMChain(llm=llm, prompt=prompt, **kwargs)
tags = kwargs.pop("tags", [])
return RunEvaluatorChain(
eval_chain=eval_chain,
input_mapper=input_mapp... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/implementations.html |
8f5554f4f4c8-0 | Source code for langchain.evaluation.run_evaluators.base
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from langchainplus_sdk import EvaluationResult, RunEvaluator
from langchainplus_sdk.schemas import Example, Run
from langchain.callbacks.manager import ... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/base.html |
8f5554f4f4c8-1 | return ["feedback"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
run: Run = inputs["run"]
example: Optional[Example] = inputs.get("example")
chain_i... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/base.html |
8f5554f4f4c8-2 | return {"feedback": feedback}
[docs] def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
return self({"run": run, "example": example})["feedback"]
[docs] async def aevaluate_run(
self, run: Run, example: Optional... | https://api.python.langchain.com/en/latest/_modules/langchain/evaluation/run_evaluators/base.html |
0e1c25b57cce-0 | Source code for langchain.callbacks.streaming_aiter
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import LLMResult
# TODO If used by two LLM runs in parallel this w... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter.html |
0e1c25b57cce-1 | done, other = await asyncio.wait(
[
# NOTE: If you add other tasks here, update the code below,
# which assumes each set has exactly one task each
asyncio.ensure_future(self.queue.get()),
asyncio.ensure_future(self.done.wait... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter.html |
457b5ae9b8f3-0 | Source code for langchain.callbacks.utils
import hashlib
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple, Union
[docs]def import_spacy() -> Any:
"""Import the spacy python package and raise an error if it is not installed."""
try:
import spacy
except ImportError:
raise... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
457b5ae9b8f3-1 | parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Yields:
(str, any): A key-value pair from the flattened dictionary.
"""
for key, value in nested_dict.items()... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
457b5ae9b8f3-2 | """Load json file to a string.
Parameters:
json_path (str): The path to the json file.
Returns:
(str): The string representation of the json file.
"""
with open(json_path, "r") as f:
data = f.read()
return data
class BaseMetadataCallbackHandler:
"""This class handles the ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
457b5ae9b8f3-3 | tool_ends (int): The number of times the tool end method has been called.
agent_ends (int): The number of times the agent end method has been called.
on_llm_start_records (list): A list of records of the on_llm_start method.
on_llm_token_records (list): A list of records of the on_llm_token meth... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
457b5ae9b8f3-4 | self.on_llm_token_records: list = []
self.on_llm_end_records: list = []
self.on_chain_start_records: list = []
self.on_chain_end_records: list = []
self.on_tool_start_records: list = []
self.on_tool_end_records: list = []
self.on_text_records: list = []
self.on_ag... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
457b5ae9b8f3-5 | def reset_callback_meta(self) -> None:
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.alwa... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html |
ea37bd1368ef-0 | Source code for langchain.callbacks.aim_callback
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
[docs]def import_aim() -> Any:
"""Import the aim python package and... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-1 | llm_ends (int): The number of times the llm end method has been called.
llm_streams (int): The number of times the text method has been called.
tool_starts (int): The number of times the tool start method has been called.
tool_ends (int): The number of times the tool end method has been called.
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-2 | """Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def get_custom_callback_meta(self) -> Dict[str, Any]:
return {
"step": self.... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-3 | """Callback Handler that logs to Aim.
Parameters:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specifi... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-4 | self._run_hash = self._run.hash
self.action_records: list = []
[docs] def setup(self, **kwargs: Any) -> None:
aim = import_aim()
if not self._run:
if self._run_hash:
self._run = aim.Run(
self._run_hash,
repo=self.repo,
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-5 | self.llm_ends += 1
self.ends += 1
resp = {"action": "on_llm_end"}
resp.update(self.get_custom_callback_meta())
response_res = deepcopy(response)
generated = [
aim.Text(generation.text)
for generations in response_res.generations
for generation ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-6 | """Run when chain ends running."""
aim = import_aim()
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = {"action": "on_chain_end"}
resp.update(self.get_custom_callback_meta())
outputs_res = deepcopy(outputs)
self._run.track(
aim.Text(ou... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-7 | [docs] def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
[docs] def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-8 | action_res.tool_input, action_res.log
)
self._run.track(aim.Text(text), name="on_agent_action", context=resp)
[docs] def flush_tracker(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 10,
log_sy... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
ea37bd1368ef-9 | self._run.close()
self.reset_callback_meta()
if reset:
self.__init__( # type: ignore
repo=repo if repo else self.repo,
experiment_name=experiment_name
if experiment_name
else self.experiment_name,
system_tra... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/aim_callback.html |
c21ace92c567-0 | Source code for langchain.callbacks.clearml_callback
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flat... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-1 | and adds the response to the list of records for both the {method}_records and
action. It then logs the response to the ClearML console.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
project_name: Optional[str] = "langchain_callback_demo",
tags: Optional[Seq... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.