id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
516ae6354a29-3
[docs] def close(self) -> None: self.logger.close() diagnostic_logger.info("Closing WhyLabs logger, see you next time!") def __enter__(self) -> WhyLabsCallbackHandler: return self def __exit__( self, exception_type: Any, exception_value: Any, traceback: Any ) -> None: self.close() [docs] @classmethod def from_params( cls, *, api_key: Optional[str] = None, org_id: Optional[str] = None, dataset_id: Optional[str] = None, sentiment: bool = False, toxicity: bool = False, themes: bool = False, ) -> Logger: """Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
516ae6354a29-4
metric. """ # langkit library will import necessary whylogs libraries import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes) import whylogs as why from whylogs.api.writer.whylabs import WhyLabsWriter from whylogs.core.schema import DeclarativeSchema from whylogs.experimental.core.metrics.udf_metric import generate_udf_schema api_key = api_key or get_from_env("api_key", "WHYLABS_API_KEY") org_id = org_id or get_from_env("org_id", "WHYLABS_DEFAULT_ORG_ID") dataset_id = dataset_id or get_from_env( "dataset_id", "WHYLABS_DEFAULT_DATASET_ID" ) whylabs_writer = WhyLabsWriter( api_key=api_key, org_id=org_id, dataset_id=dataset_id ) langkit_schema = DeclarativeSchema(generate_udf_schema()) whylabs_logger = why.logger( mode="rolling", interval=5, when="M", schema=langkit_schema ) whylabs_logger.append_writer(writer=whylabs_writer) diagnostic_logger.info( "Started whylogs Logger with WhyLabsWriter and initialized LangKit. 📝" ) return cls(whylabs_logger)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
f60492ebc90d-0
Source code for langchain.callbacks.argilla_callback import os import warnings from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class ArgillaCallbackHandler(BaseCallbackHandler): """Callback Handler that logs into Argilla. Args: dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must exist in advance. If you need help on how to create a `FeedbackDataset` in Argilla, please visit https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html. workspace_name: name of the workspace in Argilla where the specified `FeedbackDataset` lives in. Defaults to `None`, which means that the default workspace will be used. api_url: URL of the Argilla Server that we want to use, and where the `FeedbackDataset` lives in. Defaults to `None`, which means that either `ARGILLA_API_URL` environment variable or the default http://localhost:6900 will be used. api_key: API Key to connect to the Argilla Server. Defaults to `None`, which means that either `ARGILLA_API_KEY` environment variable or the default `argilla.apikey` will be used. Raises: ImportError: if the `argilla` package is not installed. ConnectionError: if the connection to Argilla fails. FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails. Examples: >>> from langchain.llms import OpenAI >>> from langchain.callbacks import ArgillaCallbackHandler >>> argilla_callback = ArgillaCallbackHandler(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-1
>>> argilla_callback = ArgillaCallbackHandler( ... dataset_name="my-dataset", ... workspace_name="my-workspace", ... api_url="http://localhost:6900", ... api_key="argilla.apikey", ... ) >>> llm = OpenAI( ... temperature=0, ... callbacks=[argilla_callback], ... verbose=True, ... openai_api_key="API_KEY_HERE", ... ) >>> llm.generate([ ... "What is the best NLP-annotation tool out there? (no bias at all)", ... ]) "Argilla, no doubt about it." """ def __init__( self, dataset_name: str, workspace_name: Optional[str] = None, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> None: """Initializes the `ArgillaCallbackHandler`. Args: dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must exist in advance. If you need help on how to create a `FeedbackDataset` in Argilla, please visit https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html. workspace_name: name of the workspace in Argilla where the specified `FeedbackDataset` lives in. Defaults to `None`, which means that the default workspace will be used. api_url: URL of the Argilla Server that we want to use, and where the `FeedbackDataset` lives in. Defaults to `None`, which means that either
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-2
`FeedbackDataset` lives in. Defaults to `None`, which means that either `ARGILLA_API_URL` environment variable or the default http://localhost:6900 will be used. api_key: API Key to connect to the Argilla Server. Defaults to `None`, which means that either `ARGILLA_API_KEY` environment variable or the default `argilla.apikey` will be used. Raises: ImportError: if the `argilla` package is not installed. ConnectionError: if the connection to Argilla fails. FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails. """ super().__init__() # Import Argilla (not via `import_argilla` to keep hints in IDEs) try: import argilla as rg # noqa: F401 except ImportError: raise ImportError( "To use the Argilla callback manager you need to have the `argilla` " "Python package installed. Please install it with `pip install argilla`" ) # Show a warning message if Argilla will assume the default values will be used if api_url is None and os.getenv("ARGILLA_API_URL") is None: warnings.warn( ( "Since `api_url` is None, and the env var `ARGILLA_API_URL` is not" " set, it will default to `http://localhost:6900`." ), ) if api_key is None and os.getenv("ARGILLA_API_KEY") is None: warnings.warn( ( "Since `api_key` is None, and the env var `ARGILLA_API_KEY` is not" " set, it will default to `argilla.apikey`."
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-3
" set, it will default to `argilla.apikey`." ), ) # Connect to Argilla with the provided credentials, if applicable try: rg.init( api_key=api_key, api_url=api_url, ) except Exception as e: raise ConnectionError( f"Could not connect to Argilla with exception: '{e}'.\n" "Please check your `api_key` and `api_url`, and make sure that " "the Argilla server is up and running. If the problem persists " "please report it to https://github.com/argilla-io/argilla/issues " "with the label `langchain`." ) from e # Set the Argilla variables self.dataset_name = dataset_name self.workspace_name = workspace_name or rg.get_workspace() # Retrieve the `FeedbackDataset` from Argilla (without existing records) try: self.dataset = rg.FeedbackDataset.from_argilla( name=self.dataset_name, workspace=self.workspace_name, with_records=False, ) except Exception as e: raise FileNotFoundError( "`FeedbackDataset` retrieval from Argilla failed with exception:" f" '{e}'.\nPlease check that the dataset with" f" name={self.dataset_name} in the" f" workspace={self.workspace_name} exists in advance. If you need help" " on how to create a `langchain`-compatible `FeedbackDataset` in" " Argilla, please visit" " https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html." # noqa: E501
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-4
" If the problem persists please report it to" " https://github.com/argilla-io/argilla/issues with the label" " `langchain`." ) from e supported_fields = ["prompt", "response"] if supported_fields != [field.name for field in self.dataset.fields]: raise ValueError( f"`FeedbackDataset` with name={self.dataset_name} in the" f" workspace={self.workspace_name} " "had fields that are not supported yet for the `langchain` integration." " Supported fields are: " f"{supported_fields}, and the current `FeedbackDataset` fields are" f" {[field.name for field in self.dataset.fields]}. " "For more information on how to create a `langchain`-compatible" " `FeedbackDataset` in Argilla, please visit" " https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html." # noqa: E501 ) self.prompts: Dict[str, List[str]] = {} warnings.warn( ( "The `ArgillaCallbackHandler` is currently in beta and is subject to " "change based on updates to `langchain`. Please report any issues to " "https://github.com/argilla-io/argilla/issues with the tag `langchain`." ), ) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Save the prompts in memory when an LLM starts.""" self.prompts.update({str(kwargs["parent_run_id"] or kwargs["run_id"]): prompts})
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-5
[docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log records to Argilla when an LLM ends.""" # Do nothing if there's a parent_run_id, since we will log the records when # the chain ends if kwargs["parent_run_id"]: return # Creates the records and adds them to the `FeedbackDataset` prompts = self.prompts[str(kwargs["run_id"])] for prompt, generations in zip(prompts, response.generations): self.dataset.add_records( records=[ { "fields": { "prompt": prompt, "response": generation.text.strip(), }, } for generation in generations ] ) # Push the records to Argilla self.dataset.push_to_argilla() # Pop current run from `self.runs` self.prompts.pop(str(kwargs["run_id"])) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when LLM outputs an error.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """If the key `input` is in `inputs`, then save it in `self.prompts` using either the `parent_run_id` or the `run_id` as the key. This is done so that
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-6
we don't log the same input prompt twice, once when the LLM starts and once when the chain starts. """ if "input" in inputs: self.prompts.update( { str(kwargs["parent_run_id"] or kwargs["run_id"]): ( inputs["input"] if isinstance(inputs["input"], list) else [inputs["input"]] ) } ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """If either the `parent_run_id` or the `run_id` is in `self.prompts`, then log the outputs to Argilla, and pop the run from `self.prompts`. The behavior differs if the output is a list or not. """ if not any( key in self.prompts for key in [str(kwargs["parent_run_id"]), str(kwargs["run_id"])] ): return prompts = self.prompts.get(str(kwargs["parent_run_id"])) or self.prompts.get( str(kwargs["run_id"]) ) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, list): # Creates the records and adds them to the `FeedbackDataset` self.dataset.add_records( records=[ { "fields": { "prompt": prompt, "response": output["text"].strip(), }, } for prompt, output in zip( prompts, chain_output_val # type: ignore ) ] ) else: # Creates the records and adds them to the `FeedbackDataset` self.dataset.add_records( records=[
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-7
self.dataset.add_records( records=[ { "fields": { "prompt": " ".join(prompts), # type: ignore "response": chain_output_val.strip(), }, } ] ) # Push the records to Argilla self.dataset.push_to_argilla() # Pop current run from `self.runs` if str(kwargs["parent_run_id"]) in self.prompts: self.prompts.pop(str(kwargs["parent_run_id"])) if str(kwargs["run_id"]) in self.prompts: self.prompts.pop(str(kwargs["run_id"])) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when LLM chain outputs an error.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when tool outputs an error.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
f60492ebc90d-8
) -> None: """Do nothing when tool outputs an error.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/argilla_callback.html
3e71cfcbed2d-0
Source code for langchain.callbacks.wandb_callback import json import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]def import_wandb() -> Any: """Import the wandb python package and raise an error if it is not installed.""" try: import wandb # noqa: F401 except ImportError: raise ImportError( "To use the wandb callback manager you need to have the `wandb` python " "package installed. Please install it with `pip install wandb`" ) return wandb [docs]def load_json_to_dict(json_path: Union[str, Path]) -> dict: """Load json file to a dictionary. Parameters: json_path (str): The path to the json file. Returns: (dict): The dictionary representation of the json file. """ with open(json_path, "r") as f: data = json.load(f) return data [docs]def analyze_text( text: str, complexity_metrics: bool = True, visualize: bool = True, nlp: Any = None, output_dir: Optional[Union[str, Path]] = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-1
Parameters: text (str): The text to analyze. complexity_metrics (bool): Whether to compute complexity metrics. visualize (bool): Whether to visualize the text. nlp (spacy.lang): The spacy language model to use for visualization. output_dir (str): The directory to save the visualization files to. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized in a wandb.Html element. """ resp = {} textstat = import_textstat() wandb = import_wandb() spacy = import_spacy() if complexity_metrics: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text),
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-2
"gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update(text_complexity_metrics) if visualize and nlp and output_dir is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html") dep_output_path.open("w", encoding="utf-8").write(dep_out) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html") ent_output_path.open("w", encoding="utf-8").write(ent_out) text_visualizations = { "dependency_tree": wandb.Html(str(dep_output_path)), "entities": wandb.Html(str(ent_output_path)), } resp.update(text_visualizations) return resp [docs]def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (wandb.Html): The html element.""" wandb = import_wandb() formatted_prompt = prompt.replace("\n", "<br>")
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-3
formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return wandb.Html( f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """, inject=False, ) [docs]class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Weights and Biases. Parameters: job_type (str): The type of job. project (str): The project to log to. entity (str): The entity to log to. tags (list): The tags to log. group (str): The group to log to. name (str): The name of the run. notes (str): The notes to log. visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics. stream_logs (bool): Whether to stream callback actions to W&B This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response using the run.log() method to Weights and Biases. """ def __init__( self, job_type: Optional[str] = None, project: Optional[str] = "langchain_callback_demo", entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-4
group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: bool = False, complexity_metrics: bool = False, stream_logs: bool = False, ) -> None: """Initialize callback handler.""" wandb = import_wandb() import_pandas() import_textstat() spacy = import_spacy() super().__init__() self.job_type = job_type self.project = project self.entity = entity self.tags = tags self.group = group self.name = name self.notes = notes self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.run: wandb.sdk.wandb_run.Run = wandb.init( # type: ignore job_type=self.job_type, project=self.project, entity=self.entity, tags=self.tags, group=self.group, name=self.name, notes=self.notes, ) warning = ( "DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor " "of the `WandbTracer`. Please update your code to use the `WandbTracer` " "instead." ) wandb.termwarn( warning, repeat=False, ) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load("en_core_web_sm") def _init_resp(self) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-5
def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.run.log(prompt_resp) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.on_llm_token_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp = self._init_resp()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-6
self.ends += 1 resp = self._init_resp() resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, complexity_metrics=self.complexity_metrics, visualize=self.visualize, nlp=self.nlp, output_dir=self.temp_dir.name, ) ) self.on_llm_end_records.append(generation_resp) self.action_records.append(generation_resp) if self.stream_logs: self.run.log(generation_resp) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = inputs["input"] if isinstance(chain_input, str): input_resp = deepcopy(resp) input_resp["input"] = chain_input self.on_chain_start_records.append(input_resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-7
self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) elif isinstance(chain_input, list): for inp in chain_input: input_resp = deepcopy(resp) input_resp.update(inp) self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) else: raise ValueError("Unexpected data format provided!") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end", "outputs": outputs["output"]}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start", "input_str": input_str})
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-8
resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.on_text_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-9
"""Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp = self._init_resp() resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_finish_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompts", "name"]] .dropna(axis=1)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-10
.dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns = [] if self.complexity_metrics: complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] if self.visualize: visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompts", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompts"], row["output"]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-11
row["prompts"], row["output"] ), axis=1, ) return session_analysis_df [docs] def flush_tracker( self, langchain_asset: Any = None, reset: bool = True, finish: bool = False, job_type: Optional[str] = None, project: Optional[str] = None, entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: Optional[bool] = None, complexity_metrics: Optional[bool] = None, ) -> None: """Flush the tracker and reset the session. Args: langchain_asset: The langchain asset to save. reset: Whether to reset the session. finish: Whether to finish the run. job_type: The job type. project: The project. entity: The entity. tags: The tags. group: The group. name: The name. notes: The notes. visualize: Whether to visualize. complexity_metrics: Whether to compute complexity metrics. Returns: None """ pd = import_pandas() wandb = import_wandb() action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records)) session_analysis_table = wandb.Table( dataframe=self._create_session_analysis_df() ) self.run.log( { "action_records": action_records_table, "session_analysis": session_analysis_table, } ) if langchain_asset:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
3e71cfcbed2d-12
} ) if langchain_asset: langchain_asset_path = Path(self.temp_dir.name, "model.json") model_artifact = wandb.Artifact(name="model", type="model") model_artifact.add(action_records_table, name="action_records") model_artifact.add(session_analysis_table, name="session_analysis") try: langchain_asset.save(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except ValueError: langchain_asset.save_agent(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except NotImplementedError as e: print("Could not save model.") print(repr(e)) pass self.run.log_artifact(model_artifact) if finish or reset: self.run.finish() self.temp_dir.cleanup() self.reset_callback_meta() if reset: self.__init__( # type: ignore job_type=job_type if job_type else self.job_type, project=project if project else self.project, entity=entity if entity else self.entity, tags=tags if tags else self.tags, group=group if group else self.group, name=name if name else self.name, notes=notes if notes else self.notes, visualize=visualize if visualize else self.visualize, complexity_metrics=complexity_metrics if complexity_metrics else self.complexity_metrics, )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
7905a485ac3e-0
Source code for langchain.callbacks.manager from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RetrieverManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, Document, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-1
"tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug [docs]@contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get the OpenAI callback handler in a context manager. which conveniently exposes token and cost information. Returns: OpenAICallbackHandler: The OpenAI callback handler. Example: >>> with get_openai_callback() as cb: ... # Use the OpenAI callback handler """ cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) [docs]@contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-2
""" cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) [docs]@contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get the WandbTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: None Example: >>> with wandb_tracing_enabled() as session: ... # Use the WandbTracer session """ cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) [docs]@contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[None, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced """ # Issue a warning that this is experimental warnings.warn( "The tracing v2 API is in development. " "This is not yet stable and may change in the future."
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-3
"This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, ) tracing_v2_callback_var.set(cb) yield tracing_v2_callback_var.set(None) [docs]@contextmanager def trace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManager, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: CallbackManager: The callback manager for the chain group. Example: >>> with trace_as_chain_group("group_name") as manager: ... # Use the callback manager for the chain group ... llm.predict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = CallbackManager.configure( inheritable_callbacks=[cb],
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-4
) cm = CallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, {}) yield run_manager.get_child() run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManager, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Example: >>> async with atrace_as_chain_group("group_name") as manager: ... # Use the async callback manager for the chain group ... await llm.apredict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = AsyncCallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-5
inheritable_callbacks=[cb], inheritable_tags=tags ) run_manager = await cm.on_chain_start({"name": group_name}, {}) try: yield run_manager.get_child() finally: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-6
ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-7
await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") [docs]class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (Optional[List[str]]): The list of tags. inheritable_tags (Optional[List[str]]): The list of inheritable tags. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] [docs] @classmethod
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-8
self.inheritable_tags = inheritable_tags or [] [docs] @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], ) [docs]class RunManager(BaseRunManager): """Sync Run Manager.""" [docs] def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class AsyncRunManager(BaseRunManager): """Async Run Manager.""" [docs] async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-9
parent_run_id=self.parent_run_id, **kwargs, ) [docs]class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" [docs] def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-10
run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" [docs] async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-11
"on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" [docs] def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_chain_error",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-12
_handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" [docs] def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-13
Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_action",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-14
await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" [docs] def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-15
Args: output (str): The output of the tool. """ _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" [docs] def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag to add to the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-16
"""Run when tool ends running. Args: output (str): The output of the tool. """ await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class CallbackManagerForRetrieverRun(RunManager, RetrieverManagerMixin): """Callback manager for retriever run.""" [docs] def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any, ) -> None: """Run when retriever ends running.""" _handle_event( self.handlers, "on_retriever_end", "ignore_retriever",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-17
"on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when retriever errors.""" _handle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class AsyncCallbackManagerForRetrieverRun( AsyncRunManager, RetrieverManagerMixin, ): """Async callback manager for retriever run.""" [docs] def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager [docs] async def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any ) -> None: """Run when retriever ends running.""" await _ahandle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs] async def on_retriever_error(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-18
) [docs] async def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when retriever errors.""" await _ahandle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) [docs]class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-19
run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers [docs] def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers [docs] def on_chain_start( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-20
return managers [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-21
Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] def on_retriever_start( self, query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_retriever_start", "ignore_retriever", query, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForRetrieverRun(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-22
**kwargs, ) return CallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) [docs]class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True [docs] async def on_llm_start( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-23
return True [docs] async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers [docs] async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> Any:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-24
**kwargs: Any, ) -> Any: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers [docs] async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-25
Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-26
AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] async def on_retriever_start( self, query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_retriever_start", "ignore_retriever", query, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) [docs] @classmethod def configure(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-27
) [docs] @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) T = TypeVar("T", CallbackManager, AsyncCallbackManager) [docs]def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _configure( callback_manager_cls: Type[T],
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-28
) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: T: The configured callback manager. """ callback_manager = callback_manager_cls(handlers=[]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags, inheritable_tags=inheritable_callbacks.inheritable_tags, ) local_handlers_ = ( local_callbacks
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-29
) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) if inheritable_tags or local_tags: callback_manager.add_tags(inheritable_tags or []) callback_manager.add_tags(local_tags or [], False) tracer = tracing_callback_var.get() wandb_tracer = wandb_tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) wandb_tracing_enabled_ = ( env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None ) tracer_project = os.environ.get( "LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default") ) debug = _get_debug() if ( verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or wandb_tracing_enabled_ or open_ai is not None ): if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
7905a485ac3e-30
if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_project) callback_manager.add_handler(handler, True) if wandb_tracing_enabled_ and not any( isinstance(handler, WandbTracer) for handler in callback_manager.handlers ): if wandb_tracer: callback_manager.add_handler(wandb_tracer, True) else: handler = WandbTracer() callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(project_name=tracer_project) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
2ec9424c0c25-0
Source code for langchain.callbacks.streamlit.__init__ from __future__ import annotations from typing import TYPE_CHECKING, Optional from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streamlit.streamlit_callback_handler import ( LLMThoughtLabeler as LLMThoughtLabeler, ) from langchain.callbacks.streamlit.streamlit_callback_handler import ( StreamlitCallbackHandler as _InternalStreamlitCallbackHandler, ) if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator [docs]def StreamlitCallbackHandler( parent_container: DeltaGenerator, *, max_thought_containers: int = 4, expand_new_thoughts: bool = True, collapse_completed_thoughts: bool = True, thought_labeler: Optional[LLMThoughtLabeler] = None, ) -> BaseCallbackHandler: """Construct a new StreamlitCallbackHandler. This CallbackHandler is geared towards use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts" inside a series of Streamlit expanders. Parameters ---------- parent_container The `st.container` that will contain all the Streamlit elements that the Handler creates. max_thought_containers The max number of completed LLM thought containers to show at once. When this threshold is reached, a new thought will cause the oldest thoughts to be collapsed into a "History" expander. Defaults to 4. expand_new_thoughts Each LLM "thought" gets its own `st.expander`. This param controls whether that expander is expanded by default. Defaults to True. collapse_completed_thoughts If True, LLM thought expanders will be collapsed when completed.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/__init__.html
2ec9424c0c25-1
If True, LLM thought expanders will be collapsed when completed. Defaults to True. thought_labeler An optional custom LLMThoughtLabeler instance. If unspecified, the handler will use the default thought labeling logic. Defaults to None. Returns ------- A new StreamlitCallbackHandler instance. Note that this is an "auto-updating" API: if the installed version of Streamlit has a more recent StreamlitCallbackHandler implementation, an instance of that class will be used. """ # If we're using a version of Streamlit that implements StreamlitCallbackHandler, # delegate to it instead of using our built-in handler. The official handler is # guaranteed to support the same set of kwargs. try: from streamlit.external.langchain import ( StreamlitCallbackHandler as OfficialStreamlitCallbackHandler, # type: ignore # noqa: 501 ) return OfficialStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, ) except ImportError: return _InternalStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/__init__.html
294311aef711-0
Source code for langchain.callbacks.streamlit.streamlit_callback_handler """Callback Handler that prints to streamlit.""" from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streamlit.mutable_expander import MutableExpander from langchain.schema import AgentAction, AgentFinish, LLMResult if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator def _convert_newlines(text: str) -> str: """Convert newline characters to markdown newline sequences (space, space, newline). """ return text.replace("\n", " \n") CHECKMARK_EMOJI = "✅" THINKING_EMOJI = ":thinking_face:" HISTORY_EMOJI = ":books:" EXCEPTION_EMOJI = "⚠️" [docs]class LLMThoughtState(Enum): # The LLM is thinking about what to do next. We don't know which tool we'll run. THINKING = "THINKING" # The LLM has decided to run a tool. We don't have results from the tool yet. RUNNING_TOOL = "RUNNING_TOOL" # We have results from the tool. COMPLETE = "COMPLETE" [docs]class ToolRecord(NamedTuple): name: str input_str: str class LLMThoughtLabeler: """ Generates markdown labels for LLMThought containers. Pass a custom subclass of this to StreamlitCallbackHandler to override its default labeling logic. """ def get_initial_label(self) -> str: """Return the markdown label for a new LLMThought that doesn't have
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-1
"""Return the markdown label for a new LLMThought that doesn't have an associated tool yet. """ return f"{THINKING_EMOJI} **Thinking...**" def get_tool_label(self, tool: ToolRecord, is_complete: bool) -> str: """Return the label for an LLMThought that has an associated tool. Parameters ---------- tool The tool's ToolRecord is_complete True if the thought is complete; False if the thought is still receiving input. Returns ------- The markdown label for the thought's container. """ input = tool.input_str name = tool.name emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI if name == "_Exception": emoji = EXCEPTION_EMOJI name = "Parsing error" idx = min([60, len(input)]) input = input[0:idx] if len(tool.input_str) > idx: input = input + "..." input = input.replace("\n", " ") label = f"{emoji} **{name}:** {input}" return label def get_history_label(self) -> str: """Return a markdown label for the special 'history' container that contains overflow thoughts. """ return f"{HISTORY_EMOJI} **History**" def get_final_agent_thought_label(self) -> str: """Return the markdown label for the agent's final thought - the "Now I have the answer" thought, that doesn't involve a tool. """ return f"{CHECKMARK_EMOJI} **Complete!**" class LLMThought: def __init__(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-2
class LLMThought: def __init__( self, parent_container: DeltaGenerator, labeler: LLMThoughtLabeler, expanded: bool, collapse_on_complete: bool, ): self._container = MutableExpander( parent_container=parent_container, label=labeler.get_initial_label(), expanded=expanded, ) self._state = LLMThoughtState.THINKING self._llm_token_stream = "" self._llm_token_writer_idx: Optional[int] = None self._last_tool: Optional[ToolRecord] = None self._collapse_on_complete = collapse_on_complete self._labeler = labeler @property def container(self) -> MutableExpander: """The container we're writing into.""" return self._container @property def last_tool(self) -> Optional[ToolRecord]: """The last tool executed by this thought""" return self._last_tool def _reset_llm_token_stream(self) -> None: self._llm_token_stream = "" self._llm_token_writer_idx = None def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str]) -> None: self._reset_llm_token_stream() def on_llm_new_token(self, token: str, **kwargs: Any) -> None: # This is only called when the LLM is initialized with `streaming=True` self._llm_token_stream += _convert_newlines(token) self._llm_token_writer_idx = self._container.markdown( self._llm_token_stream, index=self._llm_token_writer_idx )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-3
self._llm_token_stream, index=self._llm_token_writer_idx ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: # `response` is the concatenation of all the tokens received by the LLM. # If we're receiving streaming tokens from `on_llm_new_token`, this response # data is redundant self._reset_llm_token_stream() def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._container.markdown("**LLM encountered an error...**") self._container.exception(error) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: # Called with the name of the tool we're about to run (in `serialized[name]`), # and its input. We change our container's label to be the tool name. self._state = LLMThoughtState.RUNNING_TOOL tool_name = serialized["name"] self._last_tool = ToolRecord(name=tool_name, input_str=input_str) self._container.update( new_label=self._labeler.get_tool_label(self._last_tool, is_complete=False) ) def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: self._container.markdown(f"**{output}**") def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-4
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._container.markdown("**Tool encountered an error...**") self._container.exception(error) def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: # Called when we're about to kick off a new tool. The `action` data # tells us the tool we're about to use, and the input we'll give it. # We don't output anything here, because we'll receive this same data # when `on_tool_start` is called immediately after. pass def complete(self, final_label: Optional[str] = None) -> None: """Finish the thought.""" if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL: assert ( self._last_tool is not None ), "_last_tool should never be null when _state == RUNNING_TOOL" final_label = self._labeler.get_tool_label( self._last_tool, is_complete=True ) self._state = LLMThoughtState.COMPLETE if self._collapse_on_complete: self._container.update(new_label=final_label, new_expanded=False) else: self._container.update(new_label=final_label) def clear(self) -> None: """Remove the thought from the screen. A cleared thought can't be reused.""" self._container.clear() [docs]class StreamlitCallbackHandler(BaseCallbackHandler): def __init__( self, parent_container: DeltaGenerator, *, max_thought_containers: int = 4,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-5
*, max_thought_containers: int = 4, expand_new_thoughts: bool = True, collapse_completed_thoughts: bool = True, thought_labeler: Optional[LLMThoughtLabeler] = None, ): """Create a StreamlitCallbackHandler instance. Parameters ---------- parent_container The `st.container` that will contain all the Streamlit elements that the Handler creates. max_thought_containers The max number of completed LLM thought containers to show at once. When this threshold is reached, a new thought will cause the oldest thoughts to be collapsed into a "History" expander. Defaults to 4. expand_new_thoughts Each LLM "thought" gets its own `st.expander`. This param controls whether that expander is expanded by default. Defaults to True. collapse_completed_thoughts If True, LLM thought expanders will be collapsed when completed. Defaults to True. thought_labeler An optional custom LLMThoughtLabeler instance. If unspecified, the handler will use the default thought labeling logic. Defaults to None. """ self._parent_container = parent_container self._history_parent = parent_container.container() self._history_container: Optional[MutableExpander] = None self._current_thought: Optional[LLMThought] = None self._completed_thoughts: List[LLMThought] = [] self._max_thought_containers = max(max_thought_containers, 1) self._expand_new_thoughts = expand_new_thoughts self._collapse_completed_thoughts = collapse_completed_thoughts
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-6
self._collapse_completed_thoughts = collapse_completed_thoughts self._thought_labeler = thought_labeler or LLMThoughtLabeler() def _require_current_thought(self) -> LLMThought: """Return our current LLMThought. Raise an error if we have no current thought. """ if self._current_thought is None: raise RuntimeError("Current LLMThought is unexpectedly None!") return self._current_thought def _get_last_completed_thought(self) -> Optional[LLMThought]: """Return our most recent completed LLMThought, or None if we don't have one.""" if len(self._completed_thoughts) > 0: return self._completed_thoughts[len(self._completed_thoughts) - 1] return None @property def _num_thought_containers(self) -> int: """The number of 'thought containers' we're currently showing: the number of completed thought containers, the history container (if it exists), and the current thought container (if it exists). """ count = len(self._completed_thoughts) if self._history_container is not None: count += 1 if self._current_thought is not None: count += 1 return count def _complete_current_thought(self, final_label: Optional[str] = None) -> None: """Complete the current thought, optionally assigning it a new label. Add it to our _completed_thoughts list. """ thought = self._require_current_thought() thought.complete(final_label) self._completed_thoughts.append(thought) self._current_thought = None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-7
self._current_thought = None def _prune_old_thought_containers(self) -> None: """If we have too many thoughts onscreen, move older thoughts to the 'history container.' """ while ( self._num_thought_containers > self._max_thought_containers and len(self._completed_thoughts) > 0 ): # Create our history container if it doesn't exist, and if # max_thought_containers is > 1. (if max_thought_containers is 1, we don't # have room to show history.) if self._history_container is None and self._max_thought_containers > 1: self._history_container = MutableExpander( self._history_parent, label=self._thought_labeler.get_history_label(), expanded=False, ) oldest_thought = self._completed_thoughts.pop(0) if self._history_container is not None: self._history_container.markdown(oldest_thought.container.label) self._history_container.append_copy(oldest_thought.container) oldest_thought.clear() [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: if self._current_thought is None: self._current_thought = LLMThought( parent_container=self._parent_container, expanded=self._expand_new_thoughts, collapse_on_complete=self._collapse_completed_thoughts, labeler=self._thought_labeler, ) self._current_thought.on_llm_start(serialized, prompts)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-8
) self._current_thought.on_llm_start(serialized, prompts) # We don't prune_old_thought_containers here, because our container won't # be visible until it has a child. [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: self._require_current_thought().on_llm_new_token(token, **kwargs) self._prune_old_thought_containers() [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: self._require_current_thought().on_llm_end(response, **kwargs) self._prune_old_thought_containers() [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._require_current_thought().on_llm_error(error, **kwargs) self._prune_old_thought_containers() [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: self._require_current_thought().on_tool_start(serialized, input_str, **kwargs) self._prune_old_thought_containers() [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: self._require_current_thought().on_tool_end( output, color, observation_prefix, llm_prefix, **kwargs ) self._complete_current_thought()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
294311aef711-9
) self._complete_current_thought() [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._require_current_thought().on_tool_error(error, **kwargs) self._prune_old_thought_containers() [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: pass [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: self._require_current_thought().on_agent_action(action, color, **kwargs) self._prune_old_thought_containers() [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: if self._current_thought is not None: self._current_thought.complete( self._thought_labeler.get_final_agent_thought_label() ) self._current_thought = None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
76a12c5b3342-0
Source code for langchain.callbacks.streamlit.mutable_expander from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator from streamlit.type_util import SupportsStr [docs]class ChildType(Enum): MARKDOWN = "MARKDOWN" EXCEPTION = "EXCEPTION" [docs]class ChildRecord(NamedTuple): type: ChildType kwargs: Dict[str, Any] dg: DeltaGenerator class MutableExpander: """A Streamlit expander that can be renamed and dynamically expanded/collapsed.""" def __init__(self, parent_container: DeltaGenerator, label: str, expanded: bool): """Create a new MutableExpander. Parameters ---------- parent_container The `st.container` that the expander will be created inside. The expander transparently deletes and recreates its underlying `st.expander` instance when its label changes, and it uses `parent_container` to ensure it recreates this underlying expander in the same location onscreen. label The expander's initial label. expanded The expander's initial `expanded` value. """ self._label = label self._expanded = expanded self._parent_cursor = parent_container.empty() self._container = self._parent_cursor.expander(label, expanded) self._child_records: List[ChildRecord] = [] @property def label(self) -> str: """The expander's label string.""" return self._label @property def expanded(self) -> bool:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
76a12c5b3342-1
return self._label @property def expanded(self) -> bool: """True if the expander was created with `expanded=True`.""" return self._expanded def clear(self) -> None: """Remove the container and its contents entirely. A cleared container can't be reused. """ self._container = self._parent_cursor.empty() self._child_records.clear() def append_copy(self, other: MutableExpander) -> None: """Append a copy of another MutableExpander's children to this MutableExpander. """ other_records = other._child_records.copy() for record in other_records: self._create_child(record.type, record.kwargs) def update( self, *, new_label: Optional[str] = None, new_expanded: Optional[bool] = None ) -> None: """Change the expander's label and expanded state""" if new_label is None: new_label = self._label if new_expanded is None: new_expanded = self._expanded if self._label == new_label and self._expanded == new_expanded: # No change! return self._label = new_label self._expanded = new_expanded self._container = self._parent_cursor.expander(new_label, new_expanded) prev_records = self._child_records self._child_records = [] # Replay all children into the new container for record in prev_records: self._create_child(record.type, record.kwargs) def markdown( self, body: SupportsStr, unsafe_allow_html: bool = False, *, help: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
76a12c5b3342-2
*, help: Optional[str] = None, index: Optional[int] = None, ) -> int: """Add a Markdown element to the container and return its index.""" kwargs = {"body": body, "unsafe_allow_html": unsafe_allow_html, "help": help} new_dg = self._get_dg(index).markdown(**kwargs) # type: ignore[arg-type] record = ChildRecord(ChildType.MARKDOWN, kwargs, new_dg) return self._add_record(record, index) def exception( self, exception: BaseException, *, index: Optional[int] = None ) -> int: """Add an Exception element to the container and return its index.""" kwargs = {"exception": exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) return self._add_record(record, index) def _create_child(self, type: ChildType, kwargs: Dict[str, Any]) -> None: """Create a new child with the given params""" if type == ChildType.MARKDOWN: self.markdown(**kwargs) elif type == ChildType.EXCEPTION: self.exception(**kwargs) else: raise RuntimeError(f"Unexpected child type {type}") def _add_record(self, record: ChildRecord, index: Optional[int]) -> int: """Add a ChildRecord to self._children. If `index` is specified, replace the existing record at that index. Otherwise, append the record to the end of the list. Return the index of the added record. """ if index is not None: # Replace existing child
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
76a12c5b3342-3
""" if index is not None: # Replace existing child self._child_records[index] = record return index # Append new child self._child_records.append(record) return len(self._child_records) - 1 def _get_dg(self, index: Optional[int]) -> DeltaGenerator: if index is not None: # Existing index: reuse child's DeltaGenerator assert 0 <= index < len(self._child_records), f"Bad index: {index}" return self._child_records[index].dg # No index: use container's DeltaGenerator return self._container
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
59a8dbc5a368-0
Source code for langchain.callbacks.tracers.wandb """A Tracer Implementation that records activity to Weights & Biases.""" from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, TypedDict, Union, ) from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run, RunTypeEnum if TYPE_CHECKING: from wandb import Settings as WBSettings from wandb.sdk.data_types import trace_tree from wandb.sdk.lib.paths import StrPath from wandb.wandb_run import Run as WBRun PRINT_WARNINGS = True def _convert_lc_run_to_wb_span(trace_tree: Any, run: Run) -> trace_tree.Span: if run.run_type == RunTypeEnum.llm: return _convert_llm_run_to_wb_span(trace_tree, run) elif run.run_type == RunTypeEnum.chain: return _convert_chain_run_to_wb_span(trace_tree, run) elif run.run_type == RunTypeEnum.tool: return _convert_tool_run_to_wb_span(trace_tree, run) else: return _convert_run_to_wb_span(trace_tree, run) def _convert_llm_run_to_wb_span(trace_tree: Any, run: Run) -> trace_tree.Span: base_span = _convert_run_to_wb_span(trace_tree, run) base_span.results = [ trace_tree.Result( inputs={"prompt": prompt}, outputs={ f"gen_{g_i}": gen["text"] for g_i, gen in enumerate(run.outputs["generations"][ndx]) } if ( run.outputs is not None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-1
} if ( run.outputs is not None and len(run.outputs["generations"]) > ndx and len(run.outputs["generations"][ndx]) > 0 ) else None, ) for ndx, prompt in enumerate(run.inputs["prompts"] or []) ] base_span.span_kind = trace_tree.SpanKind.LLM return base_span def _serialize_inputs(run_inputs: dict) -> Union[dict, list]: if "input_documents" in run_inputs: docs = run_inputs["input_documents"] return [doc.json() for doc in docs] else: return run_inputs def _convert_chain_run_to_wb_span(trace_tree: Any, run: Run) -> trace_tree.Span: base_span = _convert_run_to_wb_span(trace_tree, run) base_span.results = [ trace_tree.Result(inputs=_serialize_inputs(run.inputs), outputs=run.outputs) ] base_span.child_spans = [ _convert_lc_run_to_wb_span(trace_tree, child_run) for child_run in run.child_runs ] base_span.span_kind = ( trace_tree.SpanKind.AGENT if "agent" in run.serialized.get("name", "").lower() else trace_tree.SpanKind.CHAIN ) return base_span def _convert_tool_run_to_wb_span(trace_tree: Any, run: Run) -> trace_tree.Span: base_span = _convert_run_to_wb_span(trace_tree, run) base_span.results = [ trace_tree.Result(inputs=_serialize_inputs(run.inputs), outputs=run.outputs) ] base_span.child_spans = [ _convert_lc_run_to_wb_span(trace_tree, child_run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-2
_convert_lc_run_to_wb_span(trace_tree, child_run) for child_run in run.child_runs ] base_span.span_kind = trace_tree.SpanKind.TOOL return base_span def _convert_run_to_wb_span(trace_tree: Any, run: Run) -> trace_tree.Span: attributes = {**run.extra} if run.extra else {} attributes["execution_order"] = run.execution_order return trace_tree.Span( span_id=str(run.id) if run.id is not None else None, name=run.serialized.get("name"), start_time_ms=int(run.start_time.timestamp() * 1000), end_time_ms=int(run.end_time.timestamp() * 1000), status_code=trace_tree.StatusCode.SUCCESS if run.error is None else trace_tree.StatusCode.ERROR, status_message=run.error, attributes=attributes, ) def _replace_type_with_kind(data: Any) -> Any: if isinstance(data, dict): # W&B TraceTree expects "_kind" instead of "_type" since `_type` is special # in W&B. if "_type" in data: _type = data.pop("_type") data["_kind"] = _type return {k: _replace_type_with_kind(v) for k, v in data.items()} elif isinstance(data, list): return [_replace_type_with_kind(v) for v in data] elif isinstance(data, tuple): return tuple(_replace_type_with_kind(v) for v in data) elif isinstance(data, set): return {_replace_type_with_kind(v) for v in data} else: return data [docs]class WandbRunArgs(TypedDict):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-3
return data [docs]class WandbRunArgs(TypedDict): """Arguments for the WandbTracer.""" job_type: Optional[str] dir: Optional[StrPath] config: Union[Dict, str, None] project: Optional[str] entity: Optional[str] reinit: Optional[bool] tags: Optional[Sequence] group: Optional[str] name: Optional[str] notes: Optional[str] magic: Optional[Union[dict, str, bool]] config_exclude_keys: Optional[List[str]] config_include_keys: Optional[List[str]] anonymous: Optional[str] mode: Optional[str] allow_val_change: Optional[bool] resume: Optional[Union[bool, str]] force: Optional[bool] tensorboard: Optional[bool] sync_tensorboard: Optional[bool] monitor_gym: Optional[bool] save_code: Optional[bool] id: Optional[str] settings: Union[WBSettings, Dict[str, Any], None] [docs]class WandbTracer(BaseTracer): """Callback Handler that logs to Weights and Biases. This handler will log the model architecture and run traces to Weights and Biases. This will ensure that all LangChain activity is logged to W&B. """ _run: Optional[WBRun] = None _run_args: Optional[WandbRunArgs] = None def __init__(self, run_args: Optional[WandbRunArgs] = None, **kwargs: Any) -> None: """Initializes the WandbTracer. Parameters:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-4
"""Initializes the WandbTracer. Parameters: run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not provided, `wandb.init()` will be called with no arguments. Please refer to the `wandb.init` for more details. To use W&B to monitor all LangChain activity, add this tracer like any other LangChain callback: ``` from wandb.integration.langchain import WandbTracer tracer = WandbTracer() chain = LLMChain(llm, callbacks=[tracer]) # ...end of notebook / script: tracer.finish() ``` """ super().__init__(**kwargs) try: import wandb from wandb.sdk.data_types import trace_tree except ImportError as e: raise ImportError( "Could not import wandb python package." "Please install it with `pip install wandb`." ) from e self._wandb = wandb self._trace_tree = trace_tree self._run_args = run_args self._ensure_run(should_print_url=(wandb.run is None)) [docs] def finish(self) -> None: """Waits for all asynchronous processes to finish and data to upload. Proxy for `wandb.finish()`. """ self._wandb.finish() def _log_trace_from_run(self, run: Run) -> None: """Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() try: root_span = _convert_lc_run_to_wb_span(self._trace_tree, run) except Exception as e: if PRINT_WARNINGS:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-5
except Exception as e: if PRINT_WARNINGS: self._wandb.termwarn( f"Skipping trace saving - unable to safely convert LangChain Run " f"into W&B Trace due to: {e}" ) return model_dict = None # TODO: Add something like this once we have a way to get the clean serialized # parent dict from a run: # serialized_parent = safely_get_span_producing_model(run) # if serialized_parent is not None: # model_dict = safely_convert_model_to_dict(serialized_parent) model_trace = self._trace_tree.WBTraceTree( root_span=root_span, model_dict=model_dict, ) if self._wandb.run is not None: self._wandb.run.log({"langchain_trace": model_trace}) def _ensure_run(self, should_print_url: bool = False) -> None: """Ensures an active W&B run exists. If not, will start a new run with the provided run_args. """ if self._wandb.run is None: # Make a shallow copy of the run args, so we don't modify the original run_args = self._run_args or {} # type: ignore run_args: dict = {**run_args} # type: ignore # Prefer to run in silent mode since W&B has a lot of output # which can be undesirable when dealing with text-based models. if "settings" not in run_args: # type: ignore run_args["settings"] = {"silent": True} # type: ignore # Start the run and add the stream table self._wandb.init(**run_args)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
59a8dbc5a368-6
self._wandb.init(**run_args) if self._wandb.run is not None: if should_print_url: run_url = self._wandb.run.settings.run_url self._wandb.termlog( f"Streaming LangChain activity to W&B at {run_url}\n" "`WandbTracer` is currently in beta.\n" "Please report any issues to " "https://github.com/wandb/wandb/issues with the tag " "`langchain`." ) self._wandb.run._label(repo="langchain") def _persist_run(self, run: "Run") -> None: """Persist a run.""" self._log_trace_from_run(run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
025c502df5cd-0
Source code for langchain.callbacks.tracers.langchain_v1 from __future__ import annotations import logging import os from typing import Any, Dict, Optional, Union import requests from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import ( ChainRun, LLMRun, Run, ToolRun, TracerSession, TracerSessionV1, TracerSessionV1Base, ) from langchain.schema import get_buffer_string from langchain.utils import raise_for_status_with_text [docs]def get_headers() -> Dict[str, Any]: """Get the headers for the LangChain API.""" headers: Dict[str, Any] = {"Content-Type": "application/json"} if os.getenv("LANGCHAIN_API_KEY"): headers["x-api-key"] = os.getenv("LANGCHAIN_API_KEY") return headers def _get_endpoint() -> str: return os.getenv("LANGCHAIN_ENDPOINT", "http://localhost:8000") [docs]class LangChainTracerV1(BaseTracer): """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" def __init__(self, **kwargs: Any) -> None: """Initialize the LangChain tracer.""" super().__init__(**kwargs) self.session: Optional[TracerSessionV1] = None self._endpoint = _get_endpoint() self._headers = get_headers() def _convert_to_v1_run(self, run: Run) -> Union[LLMRun, ChainRun, ToolRun]: session = self.session or self.load_default_session() if not isinstance(session, TracerSessionV1): raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
025c502df5cd-1
if not isinstance(session, TracerSessionV1): raise ValueError( "LangChainTracerV1 is not compatible with" f" session of type {type(session)}" ) if run.run_type == "llm": if "prompts" in run.inputs: prompts = run.inputs["prompts"] elif "messages" in run.inputs: prompts = [get_buffer_string(batch) for batch in run.inputs["messages"]] else: raise ValueError("No prompts found in LLM run inputs") return LLMRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, extra=run.extra, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, error=run.error, prompts=prompts, response=run.outputs if run.outputs else None, ) if run.run_type == "chain": child_runs = [self._convert_to_v1_run(run) for run in run.child_runs] return ChainRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, inputs=run.inputs, outputs=run.outputs, error=run.error,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
025c502df5cd-2
outputs=run.outputs, error=run.error, extra=run.extra, child_llm_runs=[run for run in child_runs if isinstance(run, LLMRun)], child_chain_runs=[ run for run in child_runs if isinstance(run, ChainRun) ], child_tool_runs=[run for run in child_runs if isinstance(run, ToolRun)], ) if run.run_type == "tool": child_runs = [self._convert_to_v1_run(run) for run in run.child_runs] return ToolRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, action=str(run.serialized), tool_input=run.inputs.get("input", ""), output=None if run.outputs is None else run.outputs.get("output"), error=run.error, extra=run.extra, child_chain_runs=[ run for run in child_runs if isinstance(run, ChainRun) ], child_tool_runs=[run for run in child_runs if isinstance(run, ToolRun)], child_llm_runs=[run for run in child_runs if isinstance(run, LLMRun)], ) raise ValueError(f"Unknown run type: {run.run_type}") def _persist_run(self, run: Union[Run, LLMRun, ChainRun, ToolRun]) -> None: """Persist a run.""" if isinstance(run, Run): v1_run = self._convert_to_v1_run(run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
025c502df5cd-3
v1_run = self._convert_to_v1_run(run) else: v1_run = run if isinstance(v1_run, LLMRun): endpoint = f"{self._endpoint}/llm-runs" elif isinstance(v1_run, ChainRun): endpoint = f"{self._endpoint}/chain-runs" else: endpoint = f"{self._endpoint}/tool-runs" try: response = requests.post( endpoint, data=v1_run.json(), headers=self._headers, ) raise_for_status_with_text(response) except Exception as e: logging.warning(f"Failed to persist run: {e}") def _persist_session( self, session_create: TracerSessionV1Base ) -> Union[TracerSessionV1, TracerSession]: """Persist a session.""" try: r = requests.post( f"{self._endpoint}/sessions", data=session_create.json(), headers=self._headers, ) session = TracerSessionV1(id=r.json()["id"], **session_create.dict()) except Exception as e: logging.warning(f"Failed to create session, using default session: {e}") session = TracerSessionV1(id=1, **session_create.dict()) return session def _load_session(self, session_name: Optional[str] = None) -> TracerSessionV1: """Load a session from the tracer.""" try: url = f"{self._endpoint}/sessions" if session_name: url += f"?name={session_name}" r = requests.get(url, headers=self._headers)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
025c502df5cd-4
r = requests.get(url, headers=self._headers) tracer_session = TracerSessionV1(**r.json()[0]) except Exception as e: session_type = "default" if not session_name else session_name logging.warning( f"Failed to load {session_type} session, using empty session: {e}" ) tracer_session = TracerSessionV1(id=1) self.session = tracer_session return tracer_session [docs] def load_session(self, session_name: str) -> Union[TracerSessionV1, TracerSession]: """Load a session with the given name from the tracer.""" return self._load_session(session_name) [docs] def load_default_session(self) -> Union[TracerSessionV1, TracerSession]: """Load the default tracing session and set it as the Tracer's session.""" return self._load_session("default")
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
f2ec6415f7a8-0
Source code for langchain.callbacks.tracers.langchain """A Tracer implementation that records to LangChain endpoint.""" from __future__ import annotations import logging import os from concurrent.futures import Future, ThreadPoolExecutor, wait from datetime import datetime from typing import Any, Dict, List, Optional, Set, Union from uuid import UUID from langchainplus_sdk import LangChainPlusClient from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import ( Run, RunTypeEnum, TracerSession, ) from langchain.env import get_runtime_environment from langchain.schema import BaseMessage, messages_to_dict logger = logging.getLogger(__name__) _LOGGED = set() _TRACERS: List[LangChainTracer] = [] [docs]def log_error_once(method: str, exception: Exception) -> None: """Log an error once.""" global _LOGGED if (method, type(exception)) in _LOGGED: return _LOGGED.add((method, type(exception))) logger.error(exception) [docs]def wait_for_all_tracers() -> None: global _TRACERS for tracer in _TRACERS: tracer.wait_for_futures() [docs]class LangChainTracer(BaseTracer): """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" def __init__( self, example_id: Optional[Union[UUID, str]] = None, project_name: Optional[str] = None, client: Optional[LangChainPlusClient] = None, **kwargs: Any, ) -> None: """Initialize the LangChain tracer.""" super().__init__(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
f2ec6415f7a8-1
"""Initialize the LangChain tracer.""" super().__init__(**kwargs) self.session: Optional[TracerSession] = None self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.project_name = project_name or os.getenv( "LANGCHAIN_PROJECT", os.getenv("LANGCHAIN_SESSION", "default") ) # set max_workers to 1 to process tasks in order self.executor = ThreadPoolExecutor(max_workers=1) self.client = client or LangChainPlusClient() self._futures: Set[Future] = set() global _TRACERS _TRACERS.append(self) [docs] def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> None: """Start a trace for an LLM run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) chat_model_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs={"messages": [messages_to_dict(batch) for batch in messages]}, extra=kwargs, start_time=datetime.utcnow(), execution_order=execution_order, child_execution_order=execution_order, run_type=RunTypeEnum.llm, tags=tags, ) self._start_trace(chat_model_run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
f2ec6415f7a8-2
tags=tags, ) self._start_trace(chat_model_run) self._on_chat_model_start(chat_model_run) def _persist_run(self, run: Run) -> None: """The Langchain Tracer uses Post/Patch rather than persist.""" def _persist_run_single(self, run: Run) -> None: """Persist a run.""" if run.parent_run_id is None: run.reference_example_id = self.example_id run_dict = run.dict(exclude={"child_runs"}) extra = run_dict.get("extra", {}) extra["runtime"] = get_runtime_environment() run_dict["extra"] = extra try: self.client.create_run(**run_dict, project_name=self.project_name) except Exception as e: # Errors are swallowed by the thread executor so we need to log them here log_error_once("post", e) raise def _update_run_single(self, run: Run) -> None: """Update a run.""" try: self.client.update_run(run.id, **run.dict()) except Exception as e: # Errors are swallowed by the thread executor so we need to log them here log_error_once("patch", e) raise def _on_llm_start(self, run: Run) -> None: """Persist an LLM run.""" self._futures.add( self.executor.submit(self._persist_run_single, run.copy(deep=True)) ) def _on_chat_model_start(self, run: Run) -> None: """Persist an LLM run.""" self._futures.add( self.executor.submit(self._persist_run_single, run.copy(deep=True)) )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
f2ec6415f7a8-3
self.executor.submit(self._persist_run_single, run.copy(deep=True)) ) def _on_llm_end(self, run: Run) -> None: """Process the LLM Run.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_llm_error(self, run: Run) -> None: """Process the LLM Run upon error.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_chain_start(self, run: Run) -> None: """Process the Chain Run upon start.""" self._futures.add( self.executor.submit(self._persist_run_single, run.copy(deep=True)) ) def _on_chain_end(self, run: Run) -> None: """Process the Chain Run.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_chain_error(self, run: Run) -> None: """Process the Chain Run upon error.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_tool_start(self, run: Run) -> None: """Process the Tool Run upon start.""" self._futures.add( self.executor.submit(self._persist_run_single, run.copy(deep=True)) ) def _on_tool_end(self, run: Run) -> None: """Process the Tool Run.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
f2ec6415f7a8-4
self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_tool_error(self, run: Run) -> None: """Process the Tool Run upon error.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_retriever_start(self, run: Run) -> None: """Process the Retriever Run upon start.""" self._futures.add( self.executor.submit(self._persist_run_single, run.copy(deep=True)) ) def _on_retriever_end(self, run: Run) -> None: """Process the Retriever Run.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) def _on_retriever_error(self, run: Run) -> None: """Process the Retriever Run upon error.""" self._futures.add( self.executor.submit(self._update_run_single, run.copy(deep=True)) ) [docs] def wait_for_futures(self) -> None: """Wait for the given futures to complete.""" futures = list(self._futures) wait(futures) for future in futures: self._futures.remove(future)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
f1ad553c8766-0
Source code for langchain.callbacks.tracers.stdout import json from typing import Any, List from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run from langchain.input import get_bolded_text, get_colored_text [docs]def try_json_stringify(obj: Any, fallback: str) -> str: """ Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string. """ try: return json.dumps(obj, indent=2, ensure_ascii=False) except Exception: return fallback [docs]def elapsed(run: Any) -> str: """Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second. """ elapsed_time = run.end_time - run.start_time milliseconds = elapsed_time.total_seconds() * 1000 if milliseconds < 1000: return f"{milliseconds}ms" return f"{(milliseconds / 1000):.2f}s" [docs]class ConsoleCallbackHandler(BaseTracer): """Tracer that prints to the console.""" name = "console_callback_handler" def _persist_run(self, run: Run) -> None: pass [docs] def get_parents(self, run: Run) -> List[Run]: parents = [] current_run = run while current_run.parent_run_id:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
f1ad553c8766-1
parents = [] current_run = run while current_run.parent_run_id: parent = self.run_map.get(str(current_run.parent_run_id)) if parent: parents.append(parent) current_run = parent else: break return parents [docs] def get_breadcrumbs(self, run: Run) -> str: parents = self.get_parents(run)[::-1] string = " > ".join( f"{parent.execution_order}:{parent.run_type}:{parent.name}" if i != len(parents) - 1 else f"{parent.execution_order}:{parent.run_type}:{parent.name}" for i, parent in enumerate(parents + [run]) ) return string # logging methods def _on_chain_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering Chain run with input:\n") + f"{try_json_stringify(run.inputs, '[inputs]')}" ) def _on_chain_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[chain/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting Chain run with output:\n" ) + f"{try_json_stringify(run.outputs, '[outputs]')}" ) def _on_chain_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
f1ad553c8766-2
crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Chain run errored with error:\n" ) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_llm_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) inputs = ( {"prompts": [p.strip() for p in run.inputs["prompts"]]} if "prompts" in run.inputs else run.inputs ) print( f"{get_colored_text('[llm/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n") + f"{try_json_stringify(inputs, '[inputs]')}" ) def _on_llm_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[llm/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n" ) + f"{try_json_stringify(run.outputs, '[response]')}" ) def _on_llm_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[llm/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
f1ad553c8766-3
) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_tool_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f'{get_colored_text("[tool/start]", color="green")} ' + get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n") + f'"{run.inputs["input"].strip()}"' ) def _on_tool_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) if run.outputs: print( f'{get_colored_text("[tool/end]", color="blue")} ' + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n" ) + f'"{run.outputs["output"].strip()}"' ) def _on_tool_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) print( f"{get_colored_text('[tool/error]', color='red')} " + get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ") + f"Tool run errored with error:\n" f"{run.error}" )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
17ae60e04238-0
Source code for langchain.callbacks.tracers.schemas """Schemas for tracers.""" from __future__ import annotations import datetime from typing import Any, Dict, List, Optional from uuid import UUID from langchainplus_sdk.schemas import RunBase as BaseRunV2 from langchainplus_sdk.schemas import RunTypeEnum from pydantic import BaseModel, Field, root_validator from langchain.schema import LLMResult [docs]class TracerSessionV1Base(BaseModel): """Base class for TracerSessionV1.""" start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) name: Optional[str] = None extra: Optional[Dict[str, Any]] = None [docs]class TracerSessionV1Create(TracerSessionV1Base): """Create class for TracerSessionV1.""" [docs]class TracerSessionV1(TracerSessionV1Base): """TracerSessionV1 schema.""" id: int [docs]class TracerSessionBase(TracerSessionV1Base): """A creation class for TracerSession.""" tenant_id: UUID [docs]class TracerSession(TracerSessionBase): """TracerSessionV1 schema for the V2 API.""" id: UUID [docs]class BaseRun(BaseModel): """Base class for Run.""" uuid: str parent_uuid: Optional[str] = None start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) extra: Optional[Dict[str, Any]] = None execution_order: int child_execution_order: int serialized: Dict[str, Any] session_id: int error: Optional[str] = None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
17ae60e04238-1
session_id: int error: Optional[str] = None [docs]class LLMRun(BaseRun): """Class for LLMRun.""" prompts: List[str] response: Optional[LLMResult] = None [docs]class ChainRun(BaseRun): """Class for ChainRun.""" inputs: Dict[str, Any] outputs: Optional[Dict[str, Any]] = None child_llm_runs: List[LLMRun] = Field(default_factory=list) child_chain_runs: List[ChainRun] = Field(default_factory=list) child_tool_runs: List[ToolRun] = Field(default_factory=list) [docs]class ToolRun(BaseRun): """Class for ToolRun.""" tool_input: str output: Optional[str] = None action: str child_llm_runs: List[LLMRun] = Field(default_factory=list) child_chain_runs: List[ChainRun] = Field(default_factory=list) child_tool_runs: List[ToolRun] = Field(default_factory=list) # Begin V2 API Schemas [docs]class Run(BaseRunV2): """Run schema for the V2 API in the Tracer.""" execution_order: int child_execution_order: int child_runs: List[Run] = Field(default_factory=list) tags: Optional[List[str]] = Field(default_factory=list) [docs] @root_validator(pre=True) def assign_name(cls, values: dict) -> dict: """Assign name to the run.""" if values.get("name") is None: if "name" in values["serialized"]: values["name"] = values["serialized"]["name"] elif "id" in values["serialized"]:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
17ae60e04238-2
elif "id" in values["serialized"]: values["name"] = values["serialized"]["id"][-1] return values ChainRun.update_forward_refs() ToolRun.update_forward_refs() __all__ = [ "BaseRun", "ChainRun", "LLMRun", "Run", "RunTypeEnum", "ToolRun", "TracerSession", "TracerSessionBase", "TracerSessionV1", "TracerSessionV1Base", "TracerSessionV1Create", ]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
10dc825dcabd-0
Source code for langchain.callbacks.tracers.evaluation """A tracer that runs evaluators over completed runs.""" from concurrent.futures import Future, ThreadPoolExecutor, wait from typing import Any, Optional, Sequence, Set, Union from uuid import UUID from langchainplus_sdk import LangChainPlusClient, RunEvaluator from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run [docs]class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangChainPlusClient, optional The LangChainPlusClient instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : LangChainPlusClient The LangChainPlusClient instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. """ name = "evaluator_callback_handler" def __init__( self, evaluators: Sequence[RunEvaluator],
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/evaluation.html
10dc825dcabd-1
def __init__( self, evaluators: Sequence[RunEvaluator], max_workers: Optional[int] = None, client: Optional[LangChainPlusClient] = None, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or LangChainPlusClient() self.evaluators = evaluators self.executor = ThreadPoolExecutor( max_workers=max(max_workers or len(evaluators), 1) ) self.futures: Set[Future] = set() def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ run_ = run.copy() run_.reference_example_id = self.example_id for evaluator in self.evaluators: self.futures.add( self.executor.submit(self.client.evaluate_run, run_, evaluator) ) [docs] def wait_for_futures(self) -> None: """Wait for all futures to complete.""" futures = list(self.futures) wait(futures) for future in futures: self.futures.remove(future)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/evaluation.html
582e0941f8e0-0
Source code for langchain.callbacks.tracers.base """Base interfaces for tracing runs.""" from __future__ import annotations import logging from abc import ABC, abstractmethod from datetime import datetime from typing import Any, Dict, List, Optional, Sequence, Union from uuid import UUID from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.tracers.schemas import Run, RunTypeEnum from langchain.schema import Document, LLMResult logger = logging.getLogger(__name__) [docs]class TracerException(Exception): """Base class for exceptions in tracers module.""" [docs]class BaseTracer(BaseCallbackHandler, ABC): """Base interface for tracers.""" def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self.run_map: Dict[str, Run] = {} @staticmethod def _add_child_run( parent_run: Run, child_run: Run, ) -> None: """Add child run to a chain run or tool run.""" parent_run.child_runs.append(child_run) @abstractmethod def _persist_run(self, run: Run) -> None: """Persist a run.""" def _start_trace(self, run: Run) -> None: """Start a trace for a run.""" if run.parent_run_id: parent_run = self.run_map[str(run.parent_run_id)] if parent_run: self._add_child_run(parent_run, run) else: logger.warning(f"Parent run with UUID {run.parent_run_id} not found.") self.run_map[str(run.id)] = run def _end_trace(self, run: Run) -> None: """End a trace for a run."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
582e0941f8e0-1
"""End a trace for a run.""" if not run.parent_run_id: self._persist_run(run) else: parent_run = self.run_map.get(str(run.parent_run_id)) if parent_run is None: logger.warning(f"Parent run with UUID {run.parent_run_id} not found.") elif ( run.child_execution_order is not None and parent_run.child_execution_order is not None and run.child_execution_order > parent_run.child_execution_order ): parent_run.child_execution_order = run.child_execution_order self.run_map.pop(str(run.id)) def _get_execution_order(self, parent_run_id: Optional[str] = None) -> int: """Get the execution order for a run.""" if parent_run_id is None: return 1 parent_run = self.run_map.get(parent_run_id) if parent_run is None: logger.warning(f"Parent run with UUID {parent_run_id} not found.") return 1 if parent_run.child_execution_order is None: raise TracerException( f"Parent run with UUID {parent_run_id} has no child execution order." ) return parent_run.child_execution_order + 1 [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> None: """Start a trace for an LLM run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
582e0941f8e0-2
execution_order = self._get_execution_order(parent_run_id_) llm_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs={"prompts": prompts}, extra=kwargs, start_time=datetime.utcnow(), execution_order=execution_order, child_execution_order=execution_order, run_type=RunTypeEnum.llm, tags=tags or [], ) self._start_trace(llm_run) self._on_llm_start(llm_run) [docs] def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> None: """End a trace for an LLM run.""" if not run_id: raise TracerException("No run_id provided for on_llm_end callback.") run_id_ = str(run_id) llm_run = self.run_map.get(run_id_) if llm_run is None or llm_run.run_type != RunTypeEnum.llm: raise TracerException("No LLM Run found to be traced") llm_run.outputs = response.dict() llm_run.end_time = datetime.utcnow() self._end_trace(llm_run) self._on_llm_end(llm_run) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, **kwargs: Any, ) -> None: """Handle an error for an LLM run.""" if not run_id: raise TracerException("No run_id provided for on_llm_error callback.") run_id_ = str(run_id)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html