id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
170f9c169d98-3
elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict def _create_message_dicts( self, messages: List[BaseMessage] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params: Dict[str, Any] = {} message_dicts = [self._convert_message_to_dict(m) for m in messages] return message_dicts, params
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/promptlayer_callback.html
fb8860cdeb6a-0
Source code for langchain.callbacks.utils import hashlib from pathlib import Path from typing import Any, Dict, Iterable, Tuple, Union [docs]def import_spacy() -> Any: """Import the spacy python package and raise an error if it is not installed.""" try: import spacy except ImportError: raise ImportError( "This callback manager requires the `spacy` python " "package installed. Please install it with `pip install spacy`" ) return spacy [docs]def import_pandas() -> Any: """Import the pandas python package and raise an error if it is not installed.""" try: import pandas except ImportError: raise ImportError( "This callback manager requires the `pandas` python " "package installed. Please install it with `pip install pandas`" ) return pandas [docs]def import_textstat() -> Any: """Import the textstat python package and raise an error if it is not installed.""" try: import textstat except ImportError: raise ImportError( "This callback manager requires the `textstat` python " "package installed. Please install it with `pip install textstat`" ) return textstat def _flatten_dict( nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_" ) -> Iterable[Tuple[str, Any]]: """ Generator that yields flattened items from a nested dictionary for a flat dict. Parameters: nested_dict (dict): The nested dictionary to flatten. parent_key (str): The prefix to prepend to the keys of the flattened dict.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
fb8860cdeb6a-1
parent_key (str): The prefix to prepend to the keys of the flattened dict. sep (str): The separator to use between the parent key and the key of the flattened dictionary. Yields: (str, any): A key-value pair from the flattened dictionary. """ for key, value in nested_dict.items(): new_key = parent_key + sep + key if parent_key else key if isinstance(value, dict): yield from _flatten_dict(value, new_key, sep) else: yield new_key, value [docs]def flatten_dict( nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_" ) -> Dict[str, Any]: """Flattens a nested dictionary into a flat dictionary. Parameters: nested_dict (dict): The nested dictionary to flatten. parent_key (str): The prefix to prepend to the keys of the flattened dict. sep (str): The separator to use between the parent key and the key of the flattened dictionary. Returns: (dict): A flat dictionary. """ flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)} return flat_dict [docs]def hash_string(s: str) -> str: """Hash a string using sha1. Parameters: s (str): The string to hash. Returns: (str): The hashed string. """ return hashlib.sha1(s.encode("utf-8")).hexdigest() [docs]def load_json(json_path: Union[str, Path]) -> str: """Load json file to a string. Parameters:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
fb8860cdeb6a-2
"""Load json file to a string. Parameters: json_path (str): The path to the json file. Returns: (str): The string representation of the json file. """ with open(json_path, "r") as f: data = f.read() return data [docs]class BaseMetadataCallbackHandler: """This class handles the metadata and associated function states for callbacks. Attributes: step (int): The current step. starts (int): The number of times the start method has been called. ends (int): The number of times the end method has been called. errors (int): The number of times the error method has been called. text_ctr (int): The number of times the text method has been called. ignore_llm_ (bool): Whether to ignore llm callbacks. ignore_chain_ (bool): Whether to ignore chain callbacks. ignore_agent_ (bool): Whether to ignore agent callbacks. ignore_retriever_ (bool): Whether to ignore retriever callbacks. always_verbose_ (bool): Whether to always be verbose. chain_starts (int): The number of times the chain start method has been called. chain_ends (int): The number of times the chain end method has been called. llm_starts (int): The number of times the llm start method has been called. llm_ends (int): The number of times the llm end method has been called. llm_streams (int): The number of times the text method has been called. tool_starts (int): The number of times the tool start method has been called. tool_ends (int): The number of times the tool end method has been called.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
fb8860cdeb6a-3
tool_ends (int): The number of times the tool end method has been called. agent_ends (int): The number of times the agent end method has been called. on_llm_start_records (list): A list of records of the on_llm_start method. on_llm_token_records (list): A list of records of the on_llm_token method. on_llm_end_records (list): A list of records of the on_llm_end method. on_chain_start_records (list): A list of records of the on_chain_start method. on_chain_end_records (list): A list of records of the on_chain_end method. on_tool_start_records (list): A list of records of the on_tool_start method. on_tool_end_records (list): A list of records of the on_tool_end method. on_agent_finish_records (list): A list of records of the on_agent_end method. """ [docs] def __init__(self) -> None: self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.ignore_retriever_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 self.on_llm_start_records: list = [] self.on_llm_token_records: list = []
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
fb8860cdeb6a-4
self.on_llm_token_records: list = [] self.on_llm_end_records: list = [] self.on_chain_start_records: list = [] self.on_chain_end_records: list = [] self.on_tool_start_records: list = [] self.on_tool_end_records: list = [] self.on_text_records: list = [] self.on_agent_finish_records: list = [] self.on_agent_action_records: list = [] @property def always_verbose(self) -> bool: """Whether to call verbose callbacks even if verbose is False.""" return self.always_verbose_ @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ [docs] def get_custom_callback_meta(self) -> Dict[str, Any]: return { "step": self.step, "starts": self.starts, "ends": self.ends, "errors": self.errors, "text_ctr": self.text_ctr, "chain_starts": self.chain_starts, "chain_ends": self.chain_ends, "llm_starts": self.llm_starts, "llm_ends": self.llm_ends, "llm_streams": self.llm_streams, "tool_starts": self.tool_starts, "tool_ends": self.tool_ends, "agent_ends": self.agent_ends, } [docs] def reset_callback_meta(self) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
fb8860cdeb6a-5
} [docs] def reset_callback_meta(self) -> None: """Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 self.on_llm_start_records = [] self.on_llm_token_records = [] self.on_llm_end_records = [] self.on_chain_start_records = [] self.on_chain_end_records = [] self.on_tool_start_records = [] self.on_tool_end_records = [] self.on_text_records = [] self.on_agent_finish_records = [] self.on_agent_action_records = [] return None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/utils.html
68633560183b-0
Source code for langchain.callbacks.streamlit.streamlit_callback_handler """Callback Handler that prints to streamlit.""" from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streamlit.mutable_expander import MutableExpander from langchain.schema import AgentAction, AgentFinish, LLMResult if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator def _convert_newlines(text: str) -> str: """Convert newline characters to markdown newline sequences (space, space, newline). """ return text.replace("\n", " \n") CHECKMARK_EMOJI = "✅" THINKING_EMOJI = ":thinking_face:" HISTORY_EMOJI = ":books:" EXCEPTION_EMOJI = "⚠️" [docs]class LLMThoughtState(Enum): """Enumerator of the LLMThought state.""" # The LLM is thinking about what to do next. We don't know which tool we'll run. THINKING = "THINKING" # The LLM has decided to run a tool. We don't have results from the tool yet. RUNNING_TOOL = "RUNNING_TOOL" # We have results from the tool. COMPLETE = "COMPLETE" [docs]class ToolRecord(NamedTuple): """The tool record as a NamedTuple.""" name: str input_str: str [docs]class LLMThoughtLabeler: """ Generates markdown labels for LLMThought containers. Pass a custom subclass of this to StreamlitCallbackHandler to override its default labeling logic. """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-1
labeling logic. """ [docs] def get_initial_label(self) -> str: """Return the markdown label for a new LLMThought that doesn't have an associated tool yet. """ return f"{THINKING_EMOJI} **Thinking...**" [docs] def get_tool_label(self, tool: ToolRecord, is_complete: bool) -> str: """Return the label for an LLMThought that has an associated tool. Parameters ---------- tool The tool's ToolRecord is_complete True if the thought is complete; False if the thought is still receiving input. Returns ------- The markdown label for the thought's container. """ input = tool.input_str name = tool.name emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI if name == "_Exception": emoji = EXCEPTION_EMOJI name = "Parsing error" idx = min([60, len(input)]) input = input[0:idx] if len(tool.input_str) > idx: input = input + "..." input = input.replace("\n", " ") label = f"{emoji} **{name}:** {input}" return label [docs] def get_history_label(self) -> str: """Return a markdown label for the special 'history' container that contains overflow thoughts. """ return f"{HISTORY_EMOJI} **History**" [docs] def get_final_agent_thought_label(self) -> str: """Return the markdown label for the agent's final thought - the "Now I have the answer" thought, that doesn't involve a tool.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-2
a tool. """ return f"{CHECKMARK_EMOJI} **Complete!**" [docs]class LLMThought: """A thought in the LLM's thought stream.""" [docs] def __init__( self, parent_container: DeltaGenerator, labeler: LLMThoughtLabeler, expanded: bool, collapse_on_complete: bool, ): """Initialize the LLMThought. Args: parent_container: The container we're writing into. labeler: The labeler to use for this thought. expanded: Whether the thought should be expanded by default. collapse_on_complete: Whether the thought should be collapsed. """ self._container = MutableExpander( parent_container=parent_container, label=labeler.get_initial_label(), expanded=expanded, ) self._state = LLMThoughtState.THINKING self._llm_token_stream = "" self._llm_token_writer_idx: Optional[int] = None self._last_tool: Optional[ToolRecord] = None self._collapse_on_complete = collapse_on_complete self._labeler = labeler @property def container(self) -> MutableExpander: """The container we're writing into.""" return self._container @property def last_tool(self) -> Optional[ToolRecord]: """The last tool executed by this thought""" return self._last_tool def _reset_llm_token_stream(self) -> None: self._llm_token_stream = "" self._llm_token_writer_idx = None [docs] def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str]) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-3
self._reset_llm_token_stream() [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: # This is only called when the LLM is initialized with `streaming=True` self._llm_token_stream += _convert_newlines(token) self._llm_token_writer_idx = self._container.markdown( self._llm_token_stream, index=self._llm_token_writer_idx ) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: # `response` is the concatenation of all the tokens received by the LLM. # If we're receiving streaming tokens from `on_llm_new_token`, this response # data is redundant self._reset_llm_token_stream() [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._container.markdown("**LLM encountered an error...**") self._container.exception(error) [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: # Called with the name of the tool we're about to run (in `serialized[name]`), # and its input. We change our container's label to be the tool name. self._state = LLMThoughtState.RUNNING_TOOL tool_name = serialized["name"] self._last_tool = ToolRecord(name=tool_name, input_str=input_str) self._container.update( new_label=self._labeler.get_tool_label(self._last_tool, is_complete=False) )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-4
) [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: self._container.markdown(f"**{output}**") [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._container.markdown("**Tool encountered an error...**") self._container.exception(error) [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: # Called when we're about to kick off a new tool. The `action` data # tells us the tool we're about to use, and the input we'll give it. # We don't output anything here, because we'll receive this same data # when `on_tool_start` is called immediately after. pass [docs] def complete(self, final_label: Optional[str] = None) -> None: """Finish the thought.""" if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL: assert ( self._last_tool is not None ), "_last_tool should never be null when _state == RUNNING_TOOL" final_label = self._labeler.get_tool_label( self._last_tool, is_complete=True ) self._state = LLMThoughtState.COMPLETE if self._collapse_on_complete: self._container.update(new_label=final_label, new_expanded=False) else:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-5
else: self._container.update(new_label=final_label) [docs] def clear(self) -> None: """Remove the thought from the screen. A cleared thought can't be reused.""" self._container.clear() [docs]class StreamlitCallbackHandler(BaseCallbackHandler): """A callback handler that writes to a Streamlit app.""" [docs] def __init__( self, parent_container: DeltaGenerator, *, max_thought_containers: int = 4, expand_new_thoughts: bool = True, collapse_completed_thoughts: bool = True, thought_labeler: Optional[LLMThoughtLabeler] = None, ): """Create a StreamlitCallbackHandler instance. Parameters ---------- parent_container The `st.container` that will contain all the Streamlit elements that the Handler creates. max_thought_containers The max number of completed LLM thought containers to show at once. When this threshold is reached, a new thought will cause the oldest thoughts to be collapsed into a "History" expander. Defaults to 4. expand_new_thoughts Each LLM "thought" gets its own `st.expander`. This param controls whether that expander is expanded by default. Defaults to True. collapse_completed_thoughts If True, LLM thought expanders will be collapsed when completed. Defaults to True. thought_labeler An optional custom LLMThoughtLabeler instance. If unspecified, the handler will use the default thought labeling logic. Defaults to None. """ self._parent_container = parent_container self._history_parent = parent_container.container()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-6
self._parent_container = parent_container self._history_parent = parent_container.container() self._history_container: Optional[MutableExpander] = None self._current_thought: Optional[LLMThought] = None self._completed_thoughts: List[LLMThought] = [] self._max_thought_containers = max(max_thought_containers, 1) self._expand_new_thoughts = expand_new_thoughts self._collapse_completed_thoughts = collapse_completed_thoughts self._thought_labeler = thought_labeler or LLMThoughtLabeler() def _require_current_thought(self) -> LLMThought: """Return our current LLMThought. Raise an error if we have no current thought. """ if self._current_thought is None: raise RuntimeError("Current LLMThought is unexpectedly None!") return self._current_thought def _get_last_completed_thought(self) -> Optional[LLMThought]: """Return our most recent completed LLMThought, or None if we don't have one.""" if len(self._completed_thoughts) > 0: return self._completed_thoughts[len(self._completed_thoughts) - 1] return None @property def _num_thought_containers(self) -> int: """The number of 'thought containers' we're currently showing: the number of completed thought containers, the history container (if it exists), and the current thought container (if it exists). """ count = len(self._completed_thoughts) if self._history_container is not None: count += 1 if self._current_thought is not None: count += 1
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-7
if self._current_thought is not None: count += 1 return count def _complete_current_thought(self, final_label: Optional[str] = None) -> None: """Complete the current thought, optionally assigning it a new label. Add it to our _completed_thoughts list. """ thought = self._require_current_thought() thought.complete(final_label) self._completed_thoughts.append(thought) self._current_thought = None def _prune_old_thought_containers(self) -> None: """If we have too many thoughts onscreen, move older thoughts to the 'history container.' """ while ( self._num_thought_containers > self._max_thought_containers and len(self._completed_thoughts) > 0 ): # Create our history container if it doesn't exist, and if # max_thought_containers is > 1. (if max_thought_containers is 1, we don't # have room to show history.) if self._history_container is None and self._max_thought_containers > 1: self._history_container = MutableExpander( self._history_parent, label=self._thought_labeler.get_history_label(), expanded=False, ) oldest_thought = self._completed_thoughts.pop(0) if self._history_container is not None: self._history_container.markdown(oldest_thought.container.label) self._history_container.append_copy(oldest_thought.container) oldest_thought.clear() [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-8
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: if self._current_thought is None: self._current_thought = LLMThought( parent_container=self._parent_container, expanded=self._expand_new_thoughts, collapse_on_complete=self._collapse_completed_thoughts, labeler=self._thought_labeler, ) self._current_thought.on_llm_start(serialized, prompts) # We don't prune_old_thought_containers here, because our container won't # be visible until it has a child. [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: self._require_current_thought().on_llm_new_token(token, **kwargs) self._prune_old_thought_containers() [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: self._require_current_thought().on_llm_end(response, **kwargs) self._prune_old_thought_containers() [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._require_current_thought().on_llm_error(error, **kwargs) self._prune_old_thought_containers() [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: self._require_current_thought().on_tool_start(serialized, input_str, **kwargs) self._prune_old_thought_containers() [docs] def on_tool_end( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-9
[docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: self._require_current_thought().on_tool_end( output, color, observation_prefix, llm_prefix, **kwargs ) self._complete_current_thought() [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self._require_current_thought().on_tool_error(error, **kwargs) self._prune_old_thought_containers() [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: pass [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: self._require_current_thought().on_agent_action(action, color, **kwargs) self._prune_old_thought_containers()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
68633560183b-10
self._prune_old_thought_containers() [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: if self._current_thought is not None: self._current_thought.complete( self._thought_labeler.get_final_agent_thought_label() ) self._current_thought = None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/streamlit_callback_handler.html
3069448efa01-0
Source code for langchain.callbacks.streamlit.mutable_expander from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator from streamlit.type_util import SupportsStr [docs]class ChildType(Enum): """The enumerator of the child type.""" MARKDOWN = "MARKDOWN" EXCEPTION = "EXCEPTION" [docs]class ChildRecord(NamedTuple): """The child record as a NamedTuple.""" type: ChildType kwargs: Dict[str, Any] dg: DeltaGenerator [docs]class MutableExpander: """A Streamlit expander that can be renamed and dynamically expanded/collapsed.""" [docs] def __init__(self, parent_container: DeltaGenerator, label: str, expanded: bool): """Create a new MutableExpander. Parameters ---------- parent_container The `st.container` that the expander will be created inside. The expander transparently deletes and recreates its underlying `st.expander` instance when its label changes, and it uses `parent_container` to ensure it recreates this underlying expander in the same location onscreen. label The expander's initial label. expanded The expander's initial `expanded` value. """ self._label = label self._expanded = expanded self._parent_cursor = parent_container.empty() self._container = self._parent_cursor.expander(label, expanded) self._child_records: List[ChildRecord] = [] @property def label(self) -> str: """The expander's label string."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
3069448efa01-1
def label(self) -> str: """The expander's label string.""" return self._label @property def expanded(self) -> bool: """True if the expander was created with `expanded=True`.""" return self._expanded [docs] def clear(self) -> None: """Remove the container and its contents entirely. A cleared container can't be reused. """ self._container = self._parent_cursor.empty() self._child_records.clear() [docs] def append_copy(self, other: MutableExpander) -> None: """Append a copy of another MutableExpander's children to this MutableExpander. """ other_records = other._child_records.copy() for record in other_records: self._create_child(record.type, record.kwargs) [docs] def update( self, *, new_label: Optional[str] = None, new_expanded: Optional[bool] = None ) -> None: """Change the expander's label and expanded state""" if new_label is None: new_label = self._label if new_expanded is None: new_expanded = self._expanded if self._label == new_label and self._expanded == new_expanded: # No change! return self._label = new_label self._expanded = new_expanded self._container = self._parent_cursor.expander(new_label, new_expanded) prev_records = self._child_records self._child_records = [] # Replay all children into the new container for record in prev_records: self._create_child(record.type, record.kwargs) [docs] def markdown( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
3069448efa01-2
[docs] def markdown( self, body: SupportsStr, unsafe_allow_html: bool = False, *, help: Optional[str] = None, index: Optional[int] = None, ) -> int: """Add a Markdown element to the container and return its index.""" kwargs = {"body": body, "unsafe_allow_html": unsafe_allow_html, "help": help} new_dg = self._get_dg(index).markdown(**kwargs) # type: ignore[arg-type] record = ChildRecord(ChildType.MARKDOWN, kwargs, new_dg) return self._add_record(record, index) [docs] def exception( self, exception: BaseException, *, index: Optional[int] = None ) -> int: """Add an Exception element to the container and return its index.""" kwargs = {"exception": exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) return self._add_record(record, index) def _create_child(self, type: ChildType, kwargs: Dict[str, Any]) -> None: """Create a new child with the given params""" if type == ChildType.MARKDOWN: self.markdown(**kwargs) elif type == ChildType.EXCEPTION: self.exception(**kwargs) else: raise RuntimeError(f"Unexpected child type {type}") def _add_record(self, record: ChildRecord, index: Optional[int]) -> int: """Add a ChildRecord to self._children. If `index` is specified, replace the existing record at that index. Otherwise, append the record to the
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
3069448efa01-3
the existing record at that index. Otherwise, append the record to the end of the list. Return the index of the added record. """ if index is not None: # Replace existing child self._child_records[index] = record return index # Append new child self._child_records.append(record) return len(self._child_records) - 1 def _get_dg(self, index: Optional[int]) -> DeltaGenerator: if index is not None: # Existing index: reuse child's DeltaGenerator assert 0 <= index < len(self._child_records), f"Bad index: {index}" return self._child_records[index].dg # No index: use container's DeltaGenerator return self._container
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit/mutable_expander.html
8fe95f5bc9a3-0
Source code for langchain.callbacks.tracers.run_collector """A tracer that collects all nested runs in a list.""" from typing import Any, List, Optional, Union from uuid import UUID from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run [docs]class RunCollectorCallbackHandler(BaseTracer): """ A tracer that collects all nested runs in a list. This tracer is useful for inspection and evaluation purposes. Parameters ---------- example_id : Optional[Union[UUID, str]], default=None The ID of the example being traced. It can be either a UUID or a string. """ name = "run-collector_callback_handler" [docs] def __init__( self, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any ) -> None: """ Initialize the RunCollectorCallbackHandler. Parameters ---------- example_id : Optional[Union[UUID, str]], default=None The ID of the example being traced. It can be either a UUID or a string. """ super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.traced_runs: List[Run] = [] def _persist_run(self, run: Run) -> None: """ Persist a run by adding it to the traced_runs list. Parameters ---------- run : Run The run to be persisted. """ run_ = run.copy() run_.reference_example_id = self.example_id self.traced_runs.append(run_)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/run_collector.html
748d98644da0-0
Source code for langchain.callbacks.tracers.base """Base interfaces for tracing runs.""" from __future__ import annotations import logging from abc import ABC, abstractmethod from datetime import datetime from typing import Any, Dict, List, Optional, Sequence, Union, cast from uuid import UUID from tenacity import RetryCallState from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.tracers.schemas import Run from langchain.load.dump import dumpd from langchain.schema.document import Document from langchain.schema.output import ChatGeneration, LLMResult logger = logging.getLogger(__name__) [docs]class TracerException(Exception): """Base class for exceptions in tracers module.""" [docs]class BaseTracer(BaseCallbackHandler, ABC): """Base interface for tracers.""" [docs] def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self.run_map: Dict[str, Run] = {} @staticmethod def _add_child_run( parent_run: Run, child_run: Run, ) -> None: """Add child run to a chain run or tool run.""" parent_run.child_runs.append(child_run) @abstractmethod def _persist_run(self, run: Run) -> None: """Persist a run.""" def _start_trace(self, run: Run) -> None: """Start a trace for a run.""" if run.parent_run_id: parent_run = self.run_map[str(run.parent_run_id)] if parent_run: self._add_child_run(parent_run, run) else: logger.debug(f"Parent run with UUID {run.parent_run_id} not found.") self.run_map[str(run.id)] = run
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-1
self.run_map[str(run.id)] = run def _end_trace(self, run: Run) -> None: """End a trace for a run.""" if not run.parent_run_id: self._persist_run(run) else: parent_run = self.run_map.get(str(run.parent_run_id)) if parent_run is None: logger.debug(f"Parent run with UUID {run.parent_run_id} not found.") elif ( run.child_execution_order is not None and parent_run.child_execution_order is not None and run.child_execution_order > parent_run.child_execution_order ): parent_run.child_execution_order = run.child_execution_order self.run_map.pop(str(run.id)) def _get_execution_order(self, parent_run_id: Optional[str] = None) -> int: """Get the execution order for a run.""" if parent_run_id is None: return 1 parent_run = self.run_map.get(parent_run_id) if parent_run is None: logger.debug(f"Parent run with UUID {parent_run_id} not found.") return 1 if parent_run.child_execution_order is None: raise TracerException( f"Parent run with UUID {parent_run_id} has no child execution order." ) return parent_run.child_execution_order + 1 [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-2
**kwargs: Any, ) -> None: """Start a trace for an LLM run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.utcnow() if metadata: kwargs.update({"metadata": metadata}) llm_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs={"prompts": prompts}, extra=kwargs, events=[{"name": "start", "time": start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, run_type="llm", tags=tags or [], ) self._start_trace(llm_run) self._on_llm_start(llm_run) [docs] def on_llm_new_token( self, token: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> None: """Run on new LLM token. Only available when streaming is enabled.""" if not run_id: raise TracerException("No run_id provided for on_llm_new_token callback.") run_id_ = str(run_id) llm_run = self.run_map.get(run_id_) if llm_run is None or llm_run.run_type != "llm": raise TracerException(f"No LLM Run found to be traced for {run_id}") llm_run.events.append( { "name": "new_token", "time": datetime.utcnow(),
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-3
{ "name": "new_token", "time": datetime.utcnow(), "kwargs": {"token": token}, }, ) [docs] def on_retry( self, retry_state: RetryCallState, *, run_id: UUID, **kwargs: Any, ) -> None: if not run_id: raise TracerException("No run_id provided for on_retry callback.") run_id_ = str(run_id) llm_run = self.run_map.get(run_id_) if llm_run is None: raise TracerException("No Run found to be traced for on_retry") retry_d: Dict[str, Any] = { "slept": retry_state.idle_for, "attempt": retry_state.attempt_number, } if retry_state.outcome is None: retry_d["outcome"] = "N/A" elif retry_state.outcome.failed: retry_d["outcome"] = "failed" exception = retry_state.outcome.exception() retry_d["exception"] = str(exception) retry_d["exception_type"] = exception.__class__.__name__ else: retry_d["outcome"] = "success" retry_d["result"] = str(retry_state.outcome.result()) llm_run.events.append( { "name": "retry", "time": datetime.utcnow(), "kwargs": retry_d, }, ) [docs] def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> None: """End a trace for an LLM run.""" if not run_id:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-4
"""End a trace for an LLM run.""" if not run_id: raise TracerException("No run_id provided for on_llm_end callback.") run_id_ = str(run_id) llm_run = self.run_map.get(run_id_) if llm_run is None or llm_run.run_type != "llm": raise TracerException(f"No LLM Run found to be traced for {run_id}") llm_run.outputs = response.dict() for i, generations in enumerate(response.generations): for j, generation in enumerate(generations): output_generation = llm_run.outputs["generations"][i][j] if "message" in output_generation: output_generation["message"] = dumpd( cast(ChatGeneration, generation).message ) llm_run.end_time = datetime.utcnow() llm_run.events.append({"name": "end", "time": llm_run.end_time}) self._end_trace(llm_run) self._on_llm_end(llm_run) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, **kwargs: Any, ) -> None: """Handle an error for an LLM run.""" if not run_id: raise TracerException("No run_id provided for on_llm_error callback.") run_id_ = str(run_id) llm_run = self.run_map.get(run_id_) if llm_run is None or llm_run.run_type != "llm": raise TracerException(f"No LLM Run found to be traced for {run_id}") llm_run.error = repr(error)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-5
llm_run.error = repr(error) llm_run.end_time = datetime.utcnow() llm_run.events.append({"name": "error", "time": llm_run.end_time}) self._end_trace(llm_run) self._on_chain_error(llm_run) [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, metadata: Optional[Dict[str, Any]] = None, run_type: Optional[str] = None, **kwargs: Any, ) -> None: """Start a trace for a chain run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.utcnow() if metadata: kwargs.update({"metadata": metadata}) chain_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs=inputs, extra=kwargs, events=[{"name": "start", "time": start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, child_runs=[], run_type=run_type or "chain", tags=tags or [], ) self._start_trace(chain_run) self._on_chain_start(chain_run) [docs] def on_chain_end( self, outputs: Dict[str, Any], *, run_id: UUID, **kwargs: Any ) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-6
) -> None: """End a trace for a chain run.""" if not run_id: raise TracerException("No run_id provided for on_chain_end callback.") chain_run = self.run_map.get(str(run_id)) if chain_run is None: raise TracerException(f"No chain Run found to be traced for {run_id}") chain_run.outputs = outputs chain_run.end_time = datetime.utcnow() chain_run.events.append({"name": "end", "time": chain_run.end_time}) self._end_trace(chain_run) self._on_chain_end(chain_run) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, **kwargs: Any, ) -> None: """Handle an error for a chain run.""" if not run_id: raise TracerException("No run_id provided for on_chain_error callback.") chain_run = self.run_map.get(str(run_id)) if chain_run is None: raise TracerException(f"No chain Run found to be traced for {run_id}") chain_run.error = repr(error) chain_run.end_time = datetime.utcnow() chain_run.events.append({"name": "error", "time": chain_run.end_time}) self._end_trace(chain_run) self._on_chain_error(chain_run) [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, metadata: Optional[Dict[str, Any]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-7
metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Start a trace for a tool run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.utcnow() if metadata: kwargs.update({"metadata": metadata}) tool_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs={"input": input_str}, extra=kwargs, events=[{"name": "start", "time": start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, child_runs=[], run_type="tool", tags=tags or [], ) self._start_trace(tool_run) self._on_tool_start(tool_run) [docs] def on_tool_end(self, output: str, *, run_id: UUID, **kwargs: Any) -> None: """End a trace for a tool run.""" if not run_id: raise TracerException("No run_id provided for on_tool_end callback.") tool_run = self.run_map.get(str(run_id)) if tool_run is None or tool_run.run_type != "tool": raise TracerException(f"No tool Run found to be traced for {run_id}") tool_run.outputs = {"output": output} tool_run.end_time = datetime.utcnow() tool_run.events.append({"name": "end", "time": tool_run.end_time}) self._end_trace(tool_run) self._on_tool_end(tool_run) [docs] def on_tool_error(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-8
self._on_tool_end(tool_run) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, **kwargs: Any, ) -> None: """Handle an error for a tool run.""" if not run_id: raise TracerException("No run_id provided for on_tool_error callback.") tool_run = self.run_map.get(str(run_id)) if tool_run is None or tool_run.run_type != "tool": raise TracerException(f"No tool Run found to be traced for {run_id}") tool_run.error = repr(error) tool_run.end_time = datetime.utcnow() tool_run.events.append({"name": "error", "time": tool_run.end_time}) self._end_trace(tool_run) self._on_tool_error(tool_run) [docs] def on_retriever_start( self, serialized: Dict[str, Any], query: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Run when Retriever starts running.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.utcnow() if metadata: kwargs.update({"metadata": metadata}) retrieval_run = Run( id=run_id, name="Retriever", parent_run_id=parent_run_id, serialized=serialized,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-9
parent_run_id=parent_run_id, serialized=serialized, inputs={"query": query}, extra=kwargs, events=[{"name": "start", "time": start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, tags=tags, child_runs=[], run_type="retriever", ) self._start_trace(retrieval_run) self._on_retriever_start(retrieval_run) [docs] def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, **kwargs: Any, ) -> None: """Run when Retriever errors.""" if not run_id: raise TracerException("No run_id provided for on_retriever_error callback.") retrieval_run = self.run_map.get(str(run_id)) if retrieval_run is None or retrieval_run.run_type != "retriever": raise TracerException(f"No retriever Run found to be traced for {run_id}") retrieval_run.error = repr(error) retrieval_run.end_time = datetime.utcnow() retrieval_run.events.append({"name": "error", "time": retrieval_run.end_time}) self._end_trace(retrieval_run) self._on_retriever_error(retrieval_run) [docs] def on_retriever_end( self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any ) -> None: """Run when Retriever ends running.""" if not run_id: raise TracerException("No run_id provided for on_retriever_end callback.")
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-10
raise TracerException("No run_id provided for on_retriever_end callback.") retrieval_run = self.run_map.get(str(run_id)) if retrieval_run is None or retrieval_run.run_type != "retriever": raise TracerException(f"No retriever Run found to be traced for {run_id}") retrieval_run.outputs = {"documents": documents} retrieval_run.end_time = datetime.utcnow() retrieval_run.events.append({"name": "end", "time": retrieval_run.end_time}) self._end_trace(retrieval_run) self._on_retriever_end(retrieval_run) def __deepcopy__(self, memo: dict) -> BaseTracer: """Deepcopy the tracer.""" return self def __copy__(self) -> BaseTracer: """Copy the tracer.""" return self def _on_llm_start(self, run: Run) -> None: """Process the LLM Run upon start.""" def _on_llm_end(self, run: Run) -> None: """Process the LLM Run.""" def _on_llm_error(self, run: Run) -> None: """Process the LLM Run upon error.""" def _on_chain_start(self, run: Run) -> None: """Process the Chain Run upon start.""" def _on_chain_end(self, run: Run) -> None: """Process the Chain Run.""" def _on_chain_error(self, run: Run) -> None: """Process the Chain Run upon error.""" def _on_tool_start(self, run: Run) -> None: """Process the Tool Run upon start.""" def _on_tool_end(self, run: Run) -> None: """Process the Tool Run."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
748d98644da0-11
"""Process the Tool Run.""" def _on_tool_error(self, run: Run) -> None: """Process the Tool Run upon error.""" def _on_chat_model_start(self, run: Run) -> None: """Process the Chat Model Run upon start.""" def _on_retriever_start(self, run: Run) -> None: """Process the Retriever Run upon start.""" def _on_retriever_end(self, run: Run) -> None: """Process the Retriever Run.""" def _on_retriever_error(self, run: Run) -> None: """Process the Retriever Run upon error."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/base.html
30b7c147151b-0
Source code for langchain.callbacks.tracers.wandb """A Tracer Implementation that records activity to Weights & Biases.""" from __future__ import annotations import json from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, TypedDict, Union, ) from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run if TYPE_CHECKING: from wandb import Settings as WBSettings from wandb.sdk.data_types.trace_tree import Span from wandb.sdk.lib.paths import StrPath from wandb.wandb_run import Run as WBRun PRINT_WARNINGS = True def _serialize_inputs(run_inputs: dict) -> dict: if "input_documents" in run_inputs: docs = run_inputs["input_documents"] return {f"input_document_{i}": doc.json() for i, doc in enumerate(docs)} else: return run_inputs [docs]class RunProcessor: """Handles the conversion of a LangChain Runs into a WBTraceTree.""" [docs] def __init__(self, wandb_module: Any, trace_module: Any): self.wandb = wandb_module self.trace_tree = trace_module [docs] def process_span(self, run: Run) -> Optional["Span"]: """Converts a LangChain Run into a W&B Trace Span. :param run: The LangChain Run to convert. :return: The converted W&B Trace Span. """ try: span = self._convert_lc_run_to_wb_span(run) return span except Exception as e: if PRINT_WARNINGS:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-1
return span except Exception as e: if PRINT_WARNINGS: self.wandb.termwarn( f"Skipping trace saving - unable to safely convert LangChain Run " f"into W&B Trace due to: {e}" ) return None def _convert_run_to_wb_span(self, run: Run) -> "Span": """Base utility to create a span from a run. :param run: The run to convert. :return: The converted Span. """ attributes = {**run.extra} if run.extra else {} attributes["execution_order"] = run.execution_order return self.trace_tree.Span( span_id=str(run.id) if run.id is not None else None, name=run.name, start_time_ms=int(run.start_time.timestamp() * 1000), end_time_ms=int(run.end_time.timestamp() * 1000), status_code=self.trace_tree.StatusCode.SUCCESS if run.error is None else self.trace_tree.StatusCode.ERROR, status_message=run.error, attributes=attributes, ) def _convert_llm_run_to_wb_span(self, run: Run) -> "Span": """Converts a LangChain LLM Run into a W&B Trace Span. :param run: The LangChain LLM Run to convert. :return: The converted W&B Trace Span. """ base_span = self._convert_run_to_wb_span(run) if base_span.attributes is None: base_span.attributes = {} base_span.attributes["llm_output"] = run.outputs.get("llm_output", {}) base_span.results = [ self.trace_tree.Result( inputs={"prompt": prompt}, outputs={
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-2
self.trace_tree.Result( inputs={"prompt": prompt}, outputs={ f"gen_{g_i}": gen["text"] for g_i, gen in enumerate(run.outputs["generations"][ndx]) } if ( run.outputs is not None and len(run.outputs["generations"]) > ndx and len(run.outputs["generations"][ndx]) > 0 ) else None, ) for ndx, prompt in enumerate(run.inputs["prompts"] or []) ] base_span.span_kind = self.trace_tree.SpanKind.LLM return base_span def _convert_chain_run_to_wb_span(self, run: Run) -> "Span": """Converts a LangChain Chain Run into a W&B Trace Span. :param run: The LangChain Chain Run to convert. :return: The converted W&B Trace Span. """ base_span = self._convert_run_to_wb_span(run) base_span.results = [ self.trace_tree.Result( inputs=_serialize_inputs(run.inputs), outputs=run.outputs ) ] base_span.child_spans = [ self._convert_lc_run_to_wb_span(child_run) for child_run in run.child_runs ] base_span.span_kind = ( self.trace_tree.SpanKind.AGENT if "agent" in run.name.lower() else self.trace_tree.SpanKind.CHAIN ) return base_span def _convert_tool_run_to_wb_span(self, run: Run) -> "Span": """Converts a LangChain Tool Run into a W&B Trace Span. :param run: The LangChain Tool Run to convert.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-3
:param run: The LangChain Tool Run to convert. :return: The converted W&B Trace Span. """ base_span = self._convert_run_to_wb_span(run) base_span.results = [ self.trace_tree.Result( inputs=_serialize_inputs(run.inputs), outputs=run.outputs ) ] base_span.child_spans = [ self._convert_lc_run_to_wb_span(child_run) for child_run in run.child_runs ] base_span.span_kind = self.trace_tree.SpanKind.TOOL return base_span def _convert_lc_run_to_wb_span(self, run: Run) -> "Span": """Utility to convert any generic LangChain Run into a W&B Trace Span. :param run: The LangChain Run to convert. :return: The converted W&B Trace Span. """ if run.run_type == "llm": return self._convert_llm_run_to_wb_span(run) elif run.run_type == "chain": return self._convert_chain_run_to_wb_span(run) elif run.run_type == "tool": return self._convert_tool_run_to_wb_span(run) else: return self._convert_run_to_wb_span(run) [docs] def process_model(self, run: Run) -> Optional[Dict[str, Any]]: """Utility to process a run for wandb model_dict serialization. :param run: The run to process. :return: The convert model_dict to pass to WBTraceTree. """ try: data = json.loads(run.json()) processed = self.flatten_run(data) keep_keys = ( "id", "name", "serialized",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-4
"id", "name", "serialized", "inputs", "outputs", "parent_run_id", "execution_order", ) processed = self.truncate_run_iterative(processed, keep_keys=keep_keys) exact_keys, partial_keys = ("lc", "type"), ("api_key",) processed = self.modify_serialized_iterative( processed, exact_keys=exact_keys, partial_keys=partial_keys ) output = self.build_tree(processed) return output except Exception as e: if PRINT_WARNINGS: self.wandb.termwarn(f"WARNING: Failed to serialize model: {e}") return None [docs] def flatten_run(self, run: Dict[str, Any]) -> List[Dict[str, Any]]: """Utility to flatten a nest run object into a list of runs. :param run: The base run to flatten. :return: The flattened list of runs. """ def flatten(child_runs: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Utility to recursively flatten a list of child runs in a run. :param child_runs: The list of child runs to flatten. :return: The flattened list of runs. """ if child_runs is None: return [] result = [] for item in child_runs: child_runs = item.pop("child_runs", []) result.append(item) result.extend(flatten(child_runs)) return result return flatten([run]) [docs] def truncate_run_iterative( self, runs: List[Dict[str, Any]], keep_keys: Tuple[str, ...] = ()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-5
) -> List[Dict[str, Any]]: """Utility to truncate a list of runs dictionaries to only keep the specified keys in each run. :param runs: The list of runs to truncate. :param keep_keys: The keys to keep in each run. :return: The truncated list of runs. """ def truncate_single(run: Dict[str, Any]) -> Dict[str, Any]: """Utility to truncate a single run dictionary to only keep the specified keys. :param run: The run dictionary to truncate. :return: The truncated run dictionary """ new_dict = {} for key in run: if key in keep_keys: new_dict[key] = run.get(key) return new_dict return list(map(truncate_single, runs)) [docs] def modify_serialized_iterative( self, runs: List[Dict[str, Any]], exact_keys: Tuple[str, ...] = (), partial_keys: Tuple[str, ...] = (), ) -> List[Dict[str, Any]]: """Utility to modify the serialized field of a list of runs dictionaries. removes any keys that match the exact_keys and any keys that contain any of the partial_keys. recursively moves the dictionaries under the kwargs key to the top level. changes the "id" field to a string "_kind" field that tells WBTraceTree how to visualize the run. promotes the "serialized" field to the top level. :param runs: The list of runs to modify. :param exact_keys: A tuple of keys to remove from the serialized field. :param partial_keys: A tuple of partial keys to remove from the serialized field.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-6
field. :return: The modified list of runs. """ def remove_exact_and_partial_keys(obj: Dict[str, Any]) -> Dict[str, Any]: """Recursively removes exact and partial keys from a dictionary. :param obj: The dictionary to remove keys from. :return: The modified dictionary. """ if isinstance(obj, dict): obj = { k: v for k, v in obj.items() if k not in exact_keys and not any(partial in k for partial in partial_keys) } for k, v in obj.items(): obj[k] = remove_exact_and_partial_keys(v) elif isinstance(obj, list): obj = [remove_exact_and_partial_keys(x) for x in obj] return obj def handle_id_and_kwargs( obj: Dict[str, Any], root: bool = False ) -> Dict[str, Any]: """Recursively handles the id and kwargs fields of a dictionary. changes the id field to a string "_kind" field that tells WBTraceTree how to visualize the run. recursively moves the dictionaries under the kwargs key to the top level. :param obj: a run dictionary with id and kwargs fields. :param root: whether this is the root dictionary or the serialized dictionary. :return: The modified dictionary. """ if isinstance(obj, dict): if ("id" in obj or "name" in obj) and not root: _kind = obj.get("id") if not _kind: _kind = [obj.get("name")] obj["_kind"] = _kind[-1] obj.pop("id", None)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-7
obj.pop("id", None) obj.pop("name", None) if "kwargs" in obj: kwargs = obj.pop("kwargs") for k, v in kwargs.items(): obj[k] = v for k, v in obj.items(): obj[k] = handle_id_and_kwargs(v) elif isinstance(obj, list): obj = [handle_id_and_kwargs(x) for x in obj] return obj def transform_serialized(serialized: Dict[str, Any]) -> Dict[str, Any]: """Transforms the serialized field of a run dictionary to be compatible with WBTraceTree. :param serialized: The serialized field of a run dictionary. :return: The transformed serialized field. """ serialized = handle_id_and_kwargs(serialized, root=True) serialized = remove_exact_and_partial_keys(serialized) return serialized def transform_run(run: Dict[str, Any]) -> Dict[str, Any]: """Transforms a run dictionary to be compatible with WBTraceTree. :param run: The run dictionary to transform. :return: The transformed run dictionary. """ transformed_dict = transform_serialized(run) serialized = transformed_dict.pop("serialized") for k, v in serialized.items(): transformed_dict[k] = v _kind = transformed_dict.get("_kind", None) name = transformed_dict.pop("name", None) exec_ord = transformed_dict.pop("execution_order", None) if not name: name = _kind output_dict = { f"{exec_ord}_{name}": transformed_dict, } return output_dict return list(map(transform_run, runs))
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-8
} return output_dict return list(map(transform_run, runs)) [docs] def build_tree(self, runs: List[Dict[str, Any]]) -> Dict[str, Any]: """Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree. """ id_to_data = {} child_to_parent = {} for entity in runs: for key, data in entity.items(): id_val = data.pop("id", None) parent_run_id = data.pop("parent_run_id", None) id_to_data[id_val] = {key: data} if parent_run_id: child_to_parent[id_val] = parent_run_id for child_id, parent_id in child_to_parent.items(): parent_dict = id_to_data[parent_id] parent_dict[next(iter(parent_dict))][ next(iter(id_to_data[child_id])) ] = id_to_data[child_id][next(iter(id_to_data[child_id]))] root_dict = next( data for id_val, data in id_to_data.items() if id_val not in child_to_parent ) return root_dict [docs]class WandbRunArgs(TypedDict): """Arguments for the WandbTracer.""" job_type: Optional[str] dir: Optional[StrPath] config: Union[Dict, str, None] project: Optional[str] entity: Optional[str] reinit: Optional[bool] tags: Optional[Sequence] group: Optional[str] name: Optional[str] notes: Optional[str]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-9
name: Optional[str] notes: Optional[str] magic: Optional[Union[dict, str, bool]] config_exclude_keys: Optional[List[str]] config_include_keys: Optional[List[str]] anonymous: Optional[str] mode: Optional[str] allow_val_change: Optional[bool] resume: Optional[Union[bool, str]] force: Optional[bool] tensorboard: Optional[bool] sync_tensorboard: Optional[bool] monitor_gym: Optional[bool] save_code: Optional[bool] id: Optional[str] settings: Union[WBSettings, Dict[str, Any], None] [docs]class WandbTracer(BaseTracer): """Callback Handler that logs to Weights and Biases. This handler will log the model architecture and run traces to Weights and Biases. This will ensure that all LangChain activity is logged to W&B. """ _run: Optional[WBRun] = None _run_args: Optional[WandbRunArgs] = None [docs] def __init__(self, run_args: Optional[WandbRunArgs] = None, **kwargs: Any) -> None: """Initializes the WandbTracer. Parameters: run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not provided, `wandb.init()` will be called with no arguments. Please refer to the `wandb.init` for more details. To use W&B to monitor all LangChain activity, add this tracer like any other LangChain callback: ``` from wandb.integration.langchain import WandbTracer tracer = WandbTracer()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-10
tracer = WandbTracer() chain = LLMChain(llm, callbacks=[tracer]) # ...end of notebook / script: tracer.finish() ``` """ super().__init__(**kwargs) try: import wandb from wandb.sdk.data_types import trace_tree except ImportError as e: raise ImportError( "Could not import wandb python package." "Please install it with `pip install -U wandb`." ) from e self._wandb = wandb self._trace_tree = trace_tree self._run_args = run_args self._ensure_run(should_print_url=(wandb.run is None)) self.run_processor = RunProcessor(self._wandb, self._trace_tree) [docs] def finish(self) -> None: """Waits for all asynchronous processes to finish and data to upload. Proxy for `wandb.finish()`. """ self._wandb.finish() def _log_trace_from_run(self, run: Run) -> None: """Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() root_span = self.run_processor.process_span(run) model_dict = self.run_processor.process_model(run) if root_span is None: return model_trace = self._trace_tree.WBTraceTree( root_span=root_span, model_dict=model_dict, ) if self._wandb.run is not None: self._wandb.run.log({"langchain_trace": model_trace}) def _ensure_run(self, should_print_url: bool = False) -> None: """Ensures an active W&B run exists.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
30b7c147151b-11
"""Ensures an active W&B run exists. If not, will start a new run with the provided run_args. """ if self._wandb.run is None: # Make a shallow copy of the run args, so we don't modify the original run_args = self._run_args or {} # type: ignore run_args: dict = {**run_args} # type: ignore # Prefer to run in silent mode since W&B has a lot of output # which can be undesirable when dealing with text-based models. if "settings" not in run_args: # type: ignore run_args["settings"] = {"silent": True} # type: ignore # Start the run and add the stream table self._wandb.init(**run_args) if self._wandb.run is not None: if should_print_url: run_url = self._wandb.run.settings.run_url self._wandb.termlog( f"Streaming LangChain activity to W&B at {run_url}\n" "`WandbTracer` is currently in beta.\n" "Please report any issues to " "https://github.com/wandb/wandb/issues with the tag " "`langchain`." ) self._wandb.run._label(repo="langchain") def _persist_run(self, run: "Run") -> None: """Persist a run.""" self._log_trace_from_run(run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/wandb.html
73eb8b1f8b98-0
Source code for langchain.callbacks.tracers.langchain_v1 from __future__ import annotations import logging import os from typing import Any, Dict, Optional, Union import requests from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import ( ChainRun, LLMRun, Run, ToolRun, TracerSession, TracerSessionV1, TracerSessionV1Base, ) from langchain.schema.messages import get_buffer_string from langchain.utils import raise_for_status_with_text logger = logging.getLogger(__name__) [docs]def get_headers() -> Dict[str, Any]: """Get the headers for the LangChain API.""" headers: Dict[str, Any] = {"Content-Type": "application/json"} if os.getenv("LANGCHAIN_API_KEY"): headers["x-api-key"] = os.getenv("LANGCHAIN_API_KEY") return headers def _get_endpoint() -> str: return os.getenv("LANGCHAIN_ENDPOINT", "http://localhost:8000") [docs]class LangChainTracerV1(BaseTracer): """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" [docs] def __init__(self, **kwargs: Any) -> None: """Initialize the LangChain tracer.""" super().__init__(**kwargs) self.session: Optional[TracerSessionV1] = None self._endpoint = _get_endpoint() self._headers = get_headers() def _convert_to_v1_run(self, run: Run) -> Union[LLMRun, ChainRun, ToolRun]: session = self.session or self.load_default_session() if not isinstance(session, TracerSessionV1):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
73eb8b1f8b98-1
if not isinstance(session, TracerSessionV1): raise ValueError( "LangChainTracerV1 is not compatible with" f" session of type {type(session)}" ) if run.run_type == "llm": if "prompts" in run.inputs: prompts = run.inputs["prompts"] elif "messages" in run.inputs: prompts = [get_buffer_string(batch) for batch in run.inputs["messages"]] else: raise ValueError("No prompts found in LLM run inputs") return LLMRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, extra=run.extra, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, error=run.error, prompts=prompts, response=run.outputs if run.outputs else None, ) if run.run_type == "chain": child_runs = [self._convert_to_v1_run(run) for run in run.child_runs] return ChainRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, inputs=run.inputs, outputs=run.outputs, error=run.error,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
73eb8b1f8b98-2
outputs=run.outputs, error=run.error, extra=run.extra, child_llm_runs=[run for run in child_runs if isinstance(run, LLMRun)], child_chain_runs=[ run for run in child_runs if isinstance(run, ChainRun) ], child_tool_runs=[run for run in child_runs if isinstance(run, ToolRun)], ) if run.run_type == "tool": child_runs = [self._convert_to_v1_run(run) for run in run.child_runs] return ToolRun( uuid=str(run.id) if run.id else None, parent_uuid=str(run.parent_run_id) if run.parent_run_id else None, start_time=run.start_time, end_time=run.end_time, execution_order=run.execution_order, child_execution_order=run.child_execution_order, serialized=run.serialized, session_id=session.id, action=str(run.serialized), tool_input=run.inputs.get("input", ""), output=None if run.outputs is None else run.outputs.get("output"), error=run.error, extra=run.extra, child_chain_runs=[ run for run in child_runs if isinstance(run, ChainRun) ], child_tool_runs=[run for run in child_runs if isinstance(run, ToolRun)], child_llm_runs=[run for run in child_runs if isinstance(run, LLMRun)], ) raise ValueError(f"Unknown run type: {run.run_type}") def _persist_run(self, run: Union[Run, LLMRun, ChainRun, ToolRun]) -> None: """Persist a run.""" if isinstance(run, Run): v1_run = self._convert_to_v1_run(run)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
73eb8b1f8b98-3
v1_run = self._convert_to_v1_run(run) else: v1_run = run if isinstance(v1_run, LLMRun): endpoint = f"{self._endpoint}/llm-runs" elif isinstance(v1_run, ChainRun): endpoint = f"{self._endpoint}/chain-runs" else: endpoint = f"{self._endpoint}/tool-runs" try: response = requests.post( endpoint, data=v1_run.json(), headers=self._headers, ) raise_for_status_with_text(response) except Exception as e: logger.warning(f"Failed to persist run: {e}") def _persist_session( self, session_create: TracerSessionV1Base ) -> Union[TracerSessionV1, TracerSession]: """Persist a session.""" try: r = requests.post( f"{self._endpoint}/sessions", data=session_create.json(), headers=self._headers, ) session = TracerSessionV1(id=r.json()["id"], **session_create.dict()) except Exception as e: logger.warning(f"Failed to create session, using default session: {e}") session = TracerSessionV1(id=1, **session_create.dict()) return session def _load_session(self, session_name: Optional[str] = None) -> TracerSessionV1: """Load a session from the tracer.""" try: url = f"{self._endpoint}/sessions" if session_name: url += f"?name={session_name}" r = requests.get(url, headers=self._headers)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
73eb8b1f8b98-4
r = requests.get(url, headers=self._headers) tracer_session = TracerSessionV1(**r.json()[0]) except Exception as e: session_type = "default" if not session_name else session_name logger.warning( f"Failed to load {session_type} session, using empty session: {e}" ) tracer_session = TracerSessionV1(id=1) self.session = tracer_session return tracer_session [docs] def load_session(self, session_name: str) -> Union[TracerSessionV1, TracerSession]: """Load a session with the given name from the tracer.""" return self._load_session(session_name) [docs] def load_default_session(self) -> Union[TracerSessionV1, TracerSession]: """Load the default tracing session and set it as the Tracer's session.""" return self._load_session("default")
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain_v1.html
939f96cb11b3-0
Source code for langchain.callbacks.tracers.schemas """Schemas for tracers.""" from __future__ import annotations import datetime import warnings from typing import Any, Dict, List, Optional from uuid import UUID from langsmith.schemas import RunBase as BaseRunV2 from langsmith.schemas import RunTypeEnum as RunTypeEnumDep from pydantic import BaseModel, Field, root_validator from langchain.schema import LLMResult [docs]def RunTypeEnum() -> RunTypeEnumDep: """RunTypeEnum.""" warnings.warn( "RunTypeEnum is deprecated. Please directly use a string instead" " (e.g. 'llm', 'chain', 'tool').", DeprecationWarning, ) return RunTypeEnumDep [docs]class TracerSessionV1Base(BaseModel): """Base class for TracerSessionV1.""" start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) name: Optional[str] = None extra: Optional[Dict[str, Any]] = None [docs]class TracerSessionV1Create(TracerSessionV1Base): """Create class for TracerSessionV1.""" [docs]class TracerSessionV1(TracerSessionV1Base): """TracerSessionV1 schema.""" id: int [docs]class TracerSessionBase(TracerSessionV1Base): """Base class for TracerSession.""" tenant_id: UUID [docs]class TracerSession(TracerSessionBase): """TracerSessionV1 schema for the V2 API.""" id: UUID [docs]class BaseRun(BaseModel): """Base class for Run.""" uuid: str parent_uuid: Optional[str] = None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
939f96cb11b3-1
uuid: str parent_uuid: Optional[str] = None start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow) extra: Optional[Dict[str, Any]] = None execution_order: int child_execution_order: int serialized: Dict[str, Any] session_id: int error: Optional[str] = None [docs]class LLMRun(BaseRun): """Class for LLMRun.""" prompts: List[str] response: Optional[LLMResult] = None [docs]class ChainRun(BaseRun): """Class for ChainRun.""" inputs: Dict[str, Any] outputs: Optional[Dict[str, Any]] = None child_llm_runs: List[LLMRun] = Field(default_factory=list) child_chain_runs: List[ChainRun] = Field(default_factory=list) child_tool_runs: List[ToolRun] = Field(default_factory=list) [docs]class ToolRun(BaseRun): """Class for ToolRun.""" tool_input: str output: Optional[str] = None action: str child_llm_runs: List[LLMRun] = Field(default_factory=list) child_chain_runs: List[ChainRun] = Field(default_factory=list) child_tool_runs: List[ToolRun] = Field(default_factory=list) # Begin V2 API Schemas [docs]class Run(BaseRunV2): """Run schema for the V2 API in the Tracer.""" execution_order: int child_execution_order: int child_runs: List[Run] = Field(default_factory=list) tags: Optional[List[str]] = Field(default_factory=list)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
939f96cb11b3-2
tags: Optional[List[str]] = Field(default_factory=list) @root_validator(pre=True) def assign_name(cls, values: dict) -> dict: """Assign name to the run.""" if values.get("name") is None: if "name" in values["serialized"]: values["name"] = values["serialized"]["name"] elif "id" in values["serialized"]: values["name"] = values["serialized"]["id"][-1] return values ChainRun.update_forward_refs() ToolRun.update_forward_refs() __all__ = [ "BaseRun", "ChainRun", "LLMRun", "Run", "RunTypeEnum", "ToolRun", "TracerSession", "TracerSessionBase", "TracerSessionV1", "TracerSessionV1Base", "TracerSessionV1Create", ]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/schemas.html
2cdc46bd2c7b-0
Source code for langchain.callbacks.tracers.langchain """A Tracer implementation that records to LangChain endpoint.""" from __future__ import annotations import logging import os from concurrent.futures import Future, ThreadPoolExecutor, wait from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Set, Union from uuid import UUID from langsmith import Client from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run, TracerSession from langchain.env import get_runtime_environment from langchain.load.dump import dumpd from langchain.schema.messages import BaseMessage logger = logging.getLogger(__name__) _LOGGED = set() _TRACERS: List[LangChainTracer] = [] _CLIENT: Optional[Client] = None [docs]def log_error_once(method: str, exception: Exception) -> None: """Log an error once.""" global _LOGGED if (method, type(exception)) in _LOGGED: return _LOGGED.add((method, type(exception))) logger.error(exception) [docs]def wait_for_all_tracers() -> None: """Wait for all tracers to finish.""" global _TRACERS for tracer in _TRACERS: tracer.wait_for_futures() def _get_client() -> Client: """Get the client.""" global _CLIENT if _CLIENT is None: _CLIENT = Client() return _CLIENT [docs]class LangChainTracer(BaseTracer): """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" [docs] def __init__( self, example_id: Optional[Union[UUID, str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
2cdc46bd2c7b-1
self, example_id: Optional[Union[UUID, str]] = None, project_name: Optional[str] = None, client: Optional[Client] = None, tags: Optional[List[str]] = None, use_threading: bool = True, **kwargs: Any, ) -> None: """Initialize the LangChain tracer.""" super().__init__(**kwargs) self.session: Optional[TracerSession] = None self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.project_name = project_name or os.getenv( "LANGCHAIN_PROJECT", os.getenv("LANGCHAIN_SESSION", "default") ) if use_threading: # set max_workers to 1 to process tasks in order self.executor: Optional[ThreadPoolExecutor] = ThreadPoolExecutor( max_workers=1 ) else: self.executor = None self.client = client or _get_client() self._futures: Set[Future] = set() self.tags = tags or [] global _TRACERS _TRACERS.append(self) [docs] def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, tags: Optional[List[str]] = None, parent_run_id: Optional[UUID] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Start a trace for an LLM run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
2cdc46bd2c7b-2
parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.utcnow() if metadata: kwargs.update({"metadata": metadata}) chat_model_run = Run( id=run_id, parent_run_id=parent_run_id, serialized=serialized, inputs={"messages": [[dumpd(msg) for msg in batch] for batch in messages]}, extra=kwargs, events=[{"name": "start", "time": start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, run_type="llm", tags=tags, ) self._start_trace(chat_model_run) self._on_chat_model_start(chat_model_run) def _persist_run(self, run: Run) -> None: """The Langchain Tracer uses Post/Patch rather than persist.""" def _get_tags(self, run: Run) -> List[str]: """Get combined tags for a run.""" tags = set(run.tags or []) tags.update(self.tags or []) return list(tags) def _persist_run_single(self, run: Run) -> None: """Persist a run.""" run_dict = run.dict(exclude={"child_runs"}) run_dict["tags"] = self._get_tags(run) extra = run_dict.get("extra", {}) extra["runtime"] = get_runtime_environment() run_dict["extra"] = extra try: self.client.create_run(**run_dict, project_name=self.project_name) except Exception as e: # Errors are swallowed by the thread executor so we need to log them here
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
2cdc46bd2c7b-3
# Errors are swallowed by the thread executor so we need to log them here log_error_once("post", e) raise def _update_run_single(self, run: Run) -> None: """Update a run.""" try: run_dict = run.dict() run_dict["tags"] = self._get_tags(run) self.client.update_run(run.id, **run_dict) except Exception as e: # Errors are swallowed by the thread executor so we need to log them here log_error_once("patch", e) raise def _submit(self, function: Callable[[Run], None], run: Run) -> None: """Submit a function to the executor.""" if self.executor is None: function(run) else: self._futures.add(self.executor.submit(function, run)) def _on_llm_start(self, run: Run) -> None: """Persist an LLM run.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, run.copy(deep=True)) def _on_chat_model_start(self, run: Run) -> None: """Persist an LLM run.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, run.copy(deep=True)) def _on_llm_end(self, run: Run) -> None: """Process the LLM Run.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_llm_error(self, run: Run) -> None: """Process the LLM Run upon error."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
2cdc46bd2c7b-4
"""Process the LLM Run upon error.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_chain_start(self, run: Run) -> None: """Process the Chain Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, run.copy(deep=True)) def _on_chain_end(self, run: Run) -> None: """Process the Chain Run.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_chain_error(self, run: Run) -> None: """Process the Chain Run upon error.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_tool_start(self, run: Run) -> None: """Process the Tool Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, run.copy(deep=True)) def _on_tool_end(self, run: Run) -> None: """Process the Tool Run.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_tool_error(self, run: Run) -> None: """Process the Tool Run upon error.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_retriever_start(self, run: Run) -> None: """Process the Retriever Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, run.copy(deep=True))
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
2cdc46bd2c7b-5
self._submit(self._persist_run_single, run.copy(deep=True)) def _on_retriever_end(self, run: Run) -> None: """Process the Retriever Run.""" self._submit(self._update_run_single, run.copy(deep=True)) def _on_retriever_error(self, run: Run) -> None: """Process the Retriever Run upon error.""" self._submit(self._update_run_single, run.copy(deep=True)) [docs] def wait_for_futures(self) -> None: """Wait for the given futures to complete.""" futures = list(self._futures) wait(futures) for future in futures: self._futures.remove(future)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/langchain.html
0f584d33f283-0
Source code for langchain.callbacks.tracers.evaluation """A tracer that runs evaluators over completed runs.""" from __future__ import annotations import logging from concurrent.futures import Future, ThreadPoolExecutor, wait from typing import Any, List, Optional, Sequence, Set, Union from uuid import UUID from langsmith import Client, RunEvaluator from langchain.callbacks.manager import tracing_v2_enabled from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.langchain import _get_client from langchain.callbacks.tracers.schemas import Run logger = logging.getLogger(__name__) _TRACERS: List[EvaluatorCallbackHandler] = [] [docs]def wait_for_all_evaluators() -> None: """Wait for all tracers to finish.""" global _TRACERS for tracer in _TRACERS: tracer.wait_for_futures() [docs]class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangSmith Client, optional The LangSmith client instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. project_name : str, optional The LangSmith project name to be organize eval chain runs under. Attributes ---------- example_id : Union[UUID, None]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/evaluation.html
0f584d33f283-1
Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : Client The LangSmith client instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. skip_unfinished : bool Whether to skip runs that are not finished or raised an error. project_name : Optional[str] The LangSmith project name to be organize eval chain runs under. """ name = "evaluator_callback_handler" [docs] def __init__( self, evaluators: Sequence[RunEvaluator], max_workers: Optional[int] = None, client: Optional[Client] = None, example_id: Optional[Union[UUID, str]] = None, skip_unfinished: bool = True, project_name: Optional[str] = "evaluators", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or _get_client() self.evaluators = evaluators self.executor = ThreadPoolExecutor( max_workers=max(max_workers or len(evaluators), 1) ) self.futures: Set[Future] = set() self.skip_unfinished = skip_unfinished self.project_name = project_name global _TRACERS _TRACERS.append(self)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/evaluation.html
0f584d33f283-2
global _TRACERS _TRACERS.append(self) def _evaluate_in_project(self, run: Run, evaluator: RunEvaluator) -> None: """Evaluate the run in the project. Parameters ---------- run : Run The run to be evaluated. evaluator : RunEvaluator The evaluator to use for evaluating the run. """ try: if self.project_name is None: self.client.evaluate_run(run, evaluator) with tracing_v2_enabled( project_name=self.project_name, tags=["eval"], client=self.client ): self.client.evaluate_run(run, evaluator) except Exception as e: logger.error( f"Error evaluating run {run.id} with " f"{evaluator.__class__.__name__}: {e}", exc_info=True, ) raise e def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ if self.skip_unfinished and not run.outputs: logger.debug(f"Skipping unfinished run {run.id}") return run_ = run.copy() run_.reference_example_id = self.example_id for evaluator in self.evaluators: self.futures.add( self.executor.submit(self._evaluate_in_project, run_, evaluator) ) [docs] def wait_for_futures(self) -> None: """Wait for all futures to complete.""" futures = list(self.futures) wait(futures) for future in futures: self.futures.remove(future)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/evaluation.html
6dbc3854648f-0
Source code for langchain.callbacks.tracers.stdout import json from typing import Any, Callable, List from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run from langchain.utils.input import get_bolded_text, get_colored_text [docs]def try_json_stringify(obj: Any, fallback: str) -> str: """ Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string. """ try: return json.dumps(obj, indent=2, ensure_ascii=False) except Exception: return fallback [docs]def elapsed(run: Any) -> str: """Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second. """ elapsed_time = run.end_time - run.start_time milliseconds = elapsed_time.total_seconds() * 1000 if milliseconds < 1000: return f"{milliseconds}ms" return f"{(milliseconds / 1000):.2f}s" [docs]class FunctionCallbackHandler(BaseTracer): """Tracer that calls a function with a single str parameter.""" name = "function_callback_handler" [docs] def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None: super().__init__(**kwargs) self.function_callback = function
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
6dbc3854648f-1
super().__init__(**kwargs) self.function_callback = function def _persist_run(self, run: Run) -> None: pass [docs] def get_parents(self, run: Run) -> List[Run]: parents = [] current_run = run while current_run.parent_run_id: parent = self.run_map.get(str(current_run.parent_run_id)) if parent: parents.append(parent) current_run = parent else: break return parents [docs] def get_breadcrumbs(self, run: Run) -> str: parents = self.get_parents(run)[::-1] string = " > ".join( f"{parent.execution_order}:{parent.run_type}:{parent.name}" if i != len(parents) - 1 else f"{parent.execution_order}:{parent.run_type}:{parent.name}" for i, parent in enumerate(parents + [run]) ) return string # logging methods def _on_chain_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering Chain run with input:\n") + f"{try_json_stringify(run.inputs, '[inputs]')}" ) def _on_chain_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[chain/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting Chain run with output:\n" )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
6dbc3854648f-2
) + f"{try_json_stringify(run.outputs, '[outputs]')}" ) def _on_chain_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Chain run errored with error:\n" ) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_llm_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) inputs = ( {"prompts": [p.strip() for p in run.inputs["prompts"]]} if "prompts" in run.inputs else run.inputs ) self.function_callback( f"{get_colored_text('[llm/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n") + f"{try_json_stringify(inputs, '[inputs]')}" ) def _on_llm_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[llm/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n" ) + f"{try_json_stringify(run.outputs, '[response]')}" ) def _on_llm_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
6dbc3854648f-3
crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[llm/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n" ) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_tool_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f'{get_colored_text("[tool/start]", color="green")} ' + get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n") + f'"{run.inputs["input"].strip()}"' ) def _on_tool_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) if run.outputs: self.function_callback( f'{get_colored_text("[tool/end]", color="blue")} ' + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n" ) + f'"{run.outputs["output"].strip()}"' ) def _on_tool_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[tool/error]', color='red')} " + get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ") + f"Tool run errored with error:\n" f"{run.error}" ) [docs]class ConsoleCallbackHandler(FunctionCallbackHandler): """Tracer that prints to the console."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
6dbc3854648f-4
"""Tracer that prints to the console.""" name = "console_callback_handler" [docs] def __init__(self, **kwargs: Any) -> None: super().__init__(function=print, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/tracers/stdout.html
5deb15263d89-0
Source code for langchain.utilities.dalle_image_generator """Util that calls OpenAI's Dall-E Image Generator.""" from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, root_validator from langchain.utils import get_from_dict_or_env [docs]class DallEAPIWrapper(BaseModel): """Wrapper for OpenAI's DALL-E Image Generator. Docs for using: 1. pip install openai 2. save your OPENAI_API_KEY in an environment variable """ client: Any #: :meta private: openai_api_key: Optional[str] = None """number of images to generate""" n: int = 1 """size of image to generate""" size: str = "1024x1024" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _dalle_image_url(self, prompt: str) -> str: params = {"prompt": prompt, "n": self.n, "size": self.size} response = self.client.create(**params) return response["data"][0]["url"] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key values["client"] = openai.Image except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/dalle_image_generator.html
5deb15263d89-1
) return values [docs] def run(self, query: str) -> str: """Run query through OpenAI and parse result.""" image_url = self._dalle_image_url(query) if image_url is None or image_url == "": # We don't want to return the assumption alone if answer is empty return "No image was generated" else: return image_url
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/dalle_image_generator.html
de314e6776f3-0
Source code for langchain.utilities.powerbi """Wrapper around a Power BI endpoint.""" from __future__ import annotations import asyncio import logging import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union import aiohttp import requests from aiohttp import ServerTimeoutError from pydantic import BaseModel, Field, root_validator, validator from requests.exceptions import Timeout _LOGGER = logging.getLogger(__name__) BASE_URL = os.getenv("POWERBI_BASE_URL", "https://api.powerbi.com/v1.0/myorg") if TYPE_CHECKING: from azure.core.credentials import TokenCredential [docs]class PowerBIDataset(BaseModel): """Create PowerBI engine from dataset ID and credential or token. Use either the credential or a supplied token to authenticate. If both are supplied the credential is used to generate a token. The impersonated_user_name is the UPN of a user to be impersonated. If the model is not RLS enabled, this will be ignored. """ dataset_id: str table_names: List[str] group_id: Optional[str] = None credential: Optional[TokenCredential] = None token: Optional[str] = None impersonated_user_name: Optional[str] = None sample_rows_in_table_info: int = Field(default=1, gt=0, le=10) schemas: Dict[str, str] = Field(default_factory=dict) aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @validator("table_names", allow_reuse=True) def fix_table_names(cls, table_names: List[str]) -> List[str]: """Fix the table names."""
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-1
"""Fix the table names.""" return [fix_table_name(table) for table in table_names] @root_validator(pre=True, allow_reuse=True) def token_or_credential_present(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that at least one of token and credentials is present.""" if "token" in values or "credential" in values: return values raise ValueError("Please provide either a credential or a token.") @property def request_url(self) -> str: """Get the request url.""" if self.group_id: return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 @property def headers(self) -> Dict[str, str]: """Get the token.""" if self.token: return { "Content-Type": "application/json", "Authorization": "Bearer " + self.token, } from azure.core.exceptions import ( ClientAuthenticationError, # pylint: disable=import-outside-toplevel ) if self.credential: try: token = self.credential.get_token( "https://analysis.windows.net/powerbi/api/.default" ).token return { "Content-Type": "application/json", "Authorization": "Bearer " + token, } except Exception as exc: # pylint: disable=broad-exception-caught raise ClientAuthenticationError( "Could not get a token from the supplied credentials." ) from exc
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-2
"Could not get a token from the supplied credentials." ) from exc raise ClientAuthenticationError("No credential or token supplied.") [docs] def get_table_names(self) -> Iterable[str]: """Get names of tables available.""" return self.table_names [docs] def get_schemas(self) -> str: """Get the available schema's.""" if self.schemas: return ", ".join([f"{key}: {value}" for key, value in self.schemas.items()]) return "No known schema's yet. Use the schema_powerbi tool first." @property def table_info(self) -> str: """Information about all tables in the database.""" return self.get_table_info() def _get_tables_to_query( self, table_names: Optional[Union[List[str], str]] = None ) -> Optional[List[str]]: """Get the tables names that need to be queried, after checking they exist.""" if table_names is not None: if ( isinstance(table_names, list) and len(table_names) > 0 and table_names[0] != "" ): fixed_tables = [fix_table_name(table) for table in table_names] non_existing_tables = [ table for table in fixed_tables if table not in self.table_names ] if non_existing_tables: _LOGGER.warning( "Table(s) %s not found in dataset.", ", ".join(non_existing_tables), ) tables = [ table for table in fixed_tables if table not in non_existing_tables ] return tables if tables else None if isinstance(table_names, str) and table_names != "":
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-3
if isinstance(table_names, str) and table_names != "": if table_names not in self.table_names: _LOGGER.warning("Table %s not found in dataset.", table_names) return None return [fix_table_name(table_names)] return self.table_names def _get_tables_todo(self, tables_todo: List[str]) -> List[str]: """Get the tables that still need to be queried.""" return [table for table in tables_todo if table not in self.schemas] def _get_schema_for_tables(self, table_names: List[str]) -> str: """Create a string of the table schemas for the supplied tables.""" schemas = [ schema for table, schema in self.schemas.items() if table in table_names ] return ", ".join(schemas) [docs] def get_table_info( self, table_names: Optional[Union[List[str], str]] = None ) -> str: """Get information about specified tables.""" tables_requested = self._get_tables_to_query(table_names) if tables_requested is None: return "No (valid) tables requested." tables_todo = self._get_tables_todo(tables_requested) for table in tables_todo: self._get_schema(table) return self._get_schema_for_tables(tables_requested) [docs] async def aget_table_info( self, table_names: Optional[Union[List[str], str]] = None ) -> str: """Get information about specified tables.""" tables_requested = self._get_tables_to_query(table_names) if tables_requested is None: return "No (valid) tables requested." tables_todo = self._get_tables_todo(tables_requested)
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-4
tables_todo = self._get_tables_todo(tables_requested) await asyncio.gather(*[self._aget_schema(table) for table in tables_todo]) return self._get_schema_for_tables(tables_requested) def _get_schema(self, table: str) -> None: """Get the schema for a table.""" try: result = self.run( f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})" ) self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"]) except Timeout: _LOGGER.warning("Timeout while getting table info for %s", table) self.schemas[table] = "unknown" except Exception as exc: # pylint: disable=broad-exception-caught _LOGGER.warning("Error while getting table info for %s: %s", table, exc) self.schemas[table] = "unknown" async def _aget_schema(self, table: str) -> None: """Get the schema for a table.""" try: result = await self.arun( f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})" ) self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"]) except ServerTimeoutError: _LOGGER.warning("Timeout while getting table info for %s", table) self.schemas[table] = "unknown" except Exception as exc: # pylint: disable=broad-exception-caught _LOGGER.warning("Error while getting table info for %s: %s", table, exc) self.schemas[table] = "unknown"
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-5
self.schemas[table] = "unknown" def _create_json_content(self, command: str) -> dict[str, Any]: """Create the json content for the request.""" return { "queries": [{"query": rf"{command}"}], "impersonatedUserName": self.impersonated_user_name, "serializerSettings": {"includeNulls": True}, } [docs] def run(self, command: str) -> Any: """Execute a DAX command and return a json representing the results.""" _LOGGER.debug("Running command: %s", command) response = requests.post( self.request_url, json=self._create_json_content(command), headers=self.headers, timeout=10, ) if response.status_code == 403: return ( "TokenError: Could not login to PowerBI, please check your credentials." ) return response.json() [docs] async def arun(self, command: str) -> Any: """Execute a DAX command and return the result asynchronously.""" _LOGGER.debug("Running command: %s", command) if self.aiosession: async with self.aiosession.post( self.request_url, headers=self.headers, json=self._create_json_content(command), timeout=10, ) as response: if response.status == 403: return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501 response_json = await response.json(content_type=response.content_type) return response_json async with aiohttp.ClientSession() as session: async with session.post( self.request_url, headers=self.headers,
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
de314e6776f3-6
async with session.post( self.request_url, headers=self.headers, json=self._create_json_content(command), timeout=10, ) as response: if response.status == 403: return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501 response_json = await response.json(content_type=response.content_type) return response_json [docs]def json_to_md( json_contents: List[Dict[str, Union[str, int, float]]], table_name: Optional[str] = None, ) -> str: """Converts a JSON object to a markdown table.""" if len(json_contents) == 0: return "" output_md = "" headers = json_contents[0].keys() for header in headers: header.replace("[", ".").replace("]", "") if table_name: header.replace(f"{table_name}.", "") output_md += f"| {header} " output_md += "|\n" for row in json_contents: for value in row.values(): output_md += f"| {value} " output_md += "|\n" return output_md [docs]def fix_table_name(table: str) -> str: """Add single quotes around table names that contain spaces.""" if " " in table and not table.startswith("'") and not table.endswith("'"): return f"'{table}'" return table
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html
f79dd9249302-0
Source code for langchain.utilities.arxiv """Util that calls Arxiv.""" import logging import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.schema import Document logger = logging.getLogger(__name__) [docs]class ArxivAPIWrapper(BaseModel): """Wrapper around ArxivAPI. To use, you should have the ``arxiv`` python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results. It limits the Document content by doc_content_chars_max. Set doc_content_chars_max=None if you don't want to limit the content size. Attributes: top_k_results: number of the top-scored document used for the arxiv tool ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool. load_max_docs: a limit to the number of loaded documents load_all_available_meta: if True: the `metadata` of the loaded Documents contains all available meta info (see https://lukasschwab.me/arxiv.py/index.html#Result), if False: the `metadata` contains only the published date, title, authors and summary. doc_content_chars_max: an optional cut limit for the length of a document's content Example: .. code-block:: python from langchain.utilities.arxiv import ArxivAPIWrapper arxiv = ArxivAPIWrapper( top_k_results = 3, ARXIV_MAX_QUERY_LENGTH = 300, load_max_docs = 3,
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html
f79dd9249302-1
load_max_docs = 3, load_all_available_meta = False, doc_content_chars_max = 40000 ) arxiv.run("tree of thought llm) """ arxiv_search: Any #: :meta private: arxiv_exceptions: Any # :meta private: top_k_results: int = 3 ARXIV_MAX_QUERY_LENGTH = 300 load_max_docs: int = 100 load_all_available_meta: bool = False doc_content_chars_max: Optional[int] = 4000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import arxiv values["arxiv_search"] = arxiv.Search values["arxiv_exceptions"] = ( arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError, ) values["arxiv_result"] = arxiv.Result except ImportError: raise ImportError( "Could not import arxiv python package. " "Please install it with `pip install arxiv`." ) return values [docs] def run(self, query: str) -> str: """ Performs an arxiv search and A single string with the publish date, title, authors, and summary for each article separated by two newlines. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ # noqa: E501 try:
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html
f79dd9249302-2
""" # noqa: E501 try: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results ).results() except self.arxiv_exceptions as ex: return f"Arxiv exception: {ex}" docs = [ f"Published: {result.updated.date()}\n" f"Title: {result.title}\n" f"Authors: {', '.join(a.name for a in result.authors)}\n" f"Summary: {result.summary}" for result in results ] if docs: return "\n\n".join(docs)[: self.doc_content_chars_max] else: return "No good Arxiv Result was found" [docs] def load(self, query: str) -> List[Document]: """ Run Arxiv search and get the article texts plus the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search Returns: a list of documents with the document.page_content in text format Performs an arxiv search, downloads the top k results as PDFs, loads them as Documents, and returns them in a List. Args: query: a plaintext search query """ # noqa: E501 try: import fitz except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) try: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs ).results()
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html
f79dd9249302-3
).results() except self.arxiv_exceptions as ex: logger.debug("Error on arxiv: %s", ex) return [] docs: List[Document] = [] for result in results: try: doc_file_name: str = result.download_pdf() with fitz.open(doc_file_name) as doc_file: text: str = "".join(page.get_text() for page in doc_file) except FileNotFoundError as f_ex: logger.debug(f_ex) continue if self.load_all_available_meta: extra_metadata = { "entry_id": result.entry_id, "published_first_time": str(result.published.date()), "comment": result.comment, "journal_ref": result.journal_ref, "doi": result.doi, "primary_category": result.primary_category, "categories": result.categories, "links": [link.href for link in result.links], } else: extra_metadata = {} metadata = { "Published": str(result.updated.date()), "Title": result.title, "Authors": ", ".join(a.name for a in result.authors), "Summary": result.summary, **extra_metadata, } doc = Document( page_content=text[: self.doc_content_chars_max], metadata=metadata ) docs.append(doc) os.remove(doc_file_name) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html
09b56df6cd9e-0
Source code for langchain.utilities.sql_database """SQLAlchemy wrapper around a database.""" from __future__ import annotations import warnings from typing import Any, Iterable, List, Optional, Sequence import sqlalchemy from sqlalchemy import MetaData, Table, create_engine, inspect, select, text from sqlalchemy.engine import Engine from sqlalchemy.exc import ProgrammingError, SQLAlchemyError from sqlalchemy.schema import CreateTable from langchain.utils import get_from_env def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str: return ( f'Name: {index["name"]}, Unique: {index["unique"]},' f' Columns: {str(index["column_names"])}' ) [docs]def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str: """ Truncate a string to a certain number of words, based on the max string length. """ if not isinstance(content, str) or length <= 0: return content if len(content) <= length: return content return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix [docs]class SQLDatabase: """SQLAlchemy wrapper around a database.""" [docs] def __init__( self, engine: Engine, schema: Optional[str] = None, metadata: Optional[MetaData] = None, ignore_tables: Optional[List[str]] = None, include_tables: Optional[List[str]] = None, sample_rows_in_table_info: int = 3, indexes_in_table_info: bool = False, custom_table_info: Optional[dict] = None, view_support: bool = False, max_string_length: int = 300, ):
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-1
max_string_length: int = 300, ): """Create engine from database URI.""" self._engine = engine self._schema = schema if include_tables and ignore_tables: raise ValueError("Cannot specify both include_tables and ignore_tables") self._inspector = inspect(self._engine) # including view support by adding the views as well as tables to the all # tables list if view_support is True self._all_tables = set( self._inspector.get_table_names(schema=schema) + (self._inspector.get_view_names(schema=schema) if view_support else []) ) self._include_tables = set(include_tables) if include_tables else set() if self._include_tables: missing_tables = self._include_tables - self._all_tables if missing_tables: raise ValueError( f"include_tables {missing_tables} not found in database" ) self._ignore_tables = set(ignore_tables) if ignore_tables else set() if self._ignore_tables: missing_tables = self._ignore_tables - self._all_tables if missing_tables: raise ValueError( f"ignore_tables {missing_tables} not found in database" ) usable_tables = self.get_usable_table_names() self._usable_tables = set(usable_tables) if usable_tables else self._all_tables if not isinstance(sample_rows_in_table_info, int): raise TypeError("sample_rows_in_table_info must be an integer") self._sample_rows_in_table_info = sample_rows_in_table_info self._indexes_in_table_info = indexes_in_table_info self._custom_table_info = custom_table_info if self._custom_table_info:
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-2
self._custom_table_info = custom_table_info if self._custom_table_info: if not isinstance(self._custom_table_info, dict): raise TypeError( "table_info must be a dictionary with table names as keys and the " "desired table info as values" ) # only keep the tables that are also present in the database intersection = set(self._custom_table_info).intersection(self._all_tables) self._custom_table_info = dict( (table, self._custom_table_info[table]) for table in self._custom_table_info if table in intersection ) self._max_string_length = max_string_length self._metadata = metadata or MetaData() # including view support if view_support = true self._metadata.reflect( views=view_support, bind=self._engine, only=list(self._usable_tables), schema=self._schema, ) [docs] @classmethod def from_uri( cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any ) -> SQLDatabase: """Construct a SQLAlchemy engine from URI.""" _engine_args = engine_args or {} return cls(create_engine(database_uri, **_engine_args), **kwargs) [docs] @classmethod def from_databricks( cls, catalog: str, schema: str, host: Optional[str] = None, api_token: Optional[str] = None, warehouse_id: Optional[str] = None, cluster_id: Optional[str] = None, engine_args: Optional[dict] = None, **kwargs: Any, ) -> SQLDatabase: """
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-3
**kwargs: Any, ) -> SQLDatabase: """ Class method to create an SQLDatabase instance from a Databricks connection. This method requires the 'databricks-sql-connector' package. If not installed, it can be added using `pip install databricks-sql-connector`. Args: catalog (str): The catalog name in the Databricks database. schema (str): The schema name in the catalog. host (Optional[str]): The Databricks workspace hostname, excluding 'https://' part. If not provided, it attempts to fetch from the environment variable 'DATABRICKS_HOST'. If still unavailable and if running in a Databricks notebook, it defaults to the current workspace hostname. Defaults to None. api_token (Optional[str]): The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. If not provided, it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable and running in a Databricks notebook, a temporary token for the current user is generated. Defaults to None. warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If provided, the method configures the connection to use this warehouse. Cannot be used with 'cluster_id'. Defaults to None. cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If provided, the method configures the connection to use this cluster. Cannot be used with 'warehouse_id'. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to. Defaults to None.
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-4
cluster the notebook is attached to. Defaults to None. engine_args (Optional[dict]): The arguments to be used when connecting Databricks. Defaults to None. **kwargs (Any): Additional keyword arguments for the `from_uri` method. Returns: SQLDatabase: An instance of SQLDatabase configured with the provided Databricks connection details. Raises: ValueError: If 'databricks-sql-connector' is not found, or if both 'warehouse_id' and 'cluster_id' are provided, or if neither 'warehouse_id' nor 'cluster_id' are provided and it's not executing inside a Databricks notebook. """ try: from databricks import sql # noqa: F401 except ImportError: raise ValueError( "databricks-sql-connector package not found, please install with" " `pip install databricks-sql-connector`" ) context = None try: from dbruntime.databricks_repl_context import get_context context = get_context() except ImportError: pass default_host = context.browserHostName if context else None if host is None: host = get_from_env("host", "DATABRICKS_HOST", default_host) default_api_token = context.apiToken if context else None if api_token is None: api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token) if warehouse_id is None and cluster_id is None: if context: cluster_id = context.clusterId else: raise ValueError( "Need to provide either 'warehouse_id' or 'cluster_id'." )
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-5
"Need to provide either 'warehouse_id' or 'cluster_id'." ) if warehouse_id and cluster_id: raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.") if warehouse_id: http_path = f"/sql/1.0/warehouses/{warehouse_id}" else: http_path = f"/sql/protocolv1/o/0/{cluster_id}" uri = ( f"databricks://token:{api_token}@{host}?" f"http_path={http_path}&catalog={catalog}&schema={schema}" ) return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs) [docs] @classmethod def from_cnosdb( cls, url: str = "127.0.0.1:8902", user: str = "root", password: str = "", tenant: str = "cnosdb", database: str = "public", ) -> SQLDatabase: """ Class method to create an SQLDatabase instance from a CnosDB connection. This method requires the 'cnos-connector' package. If not installed, it can be added using `pip install cnos-connector`. Args: url (str): The HTTP connection host name and port number of the CnosDB service, excluding "http://" or "https://", with a default value of "127.0.0.1:8902". user (str): The username used to connect to the CnosDB service, with a default value of "root". password (str): The password of the user connecting to the CnosDB service, with a default value of "".
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-6
with a default value of "". tenant (str): The name of the tenant used to connect to the CnosDB service, with a default value of "cnosdb". database (str): The name of the database in the CnosDB tenant. Returns: SQLDatabase: An instance of SQLDatabase configured with the provided CnosDB connection details. """ try: from cnosdb_connector import make_cnosdb_langchain_uri uri = make_cnosdb_langchain_uri(url, user, password, tenant, database) return cls.from_uri(database_uri=uri) except ImportError: raise ValueError( "cnos-connector package not found, please install with" " `pip install cnos-connector`" ) @property def dialect(self) -> str: """Return string representation of dialect to use.""" return self._engine.dialect.name [docs] def get_usable_table_names(self) -> Iterable[str]: """Get names of tables available.""" if self._include_tables: return sorted(self._include_tables) return sorted(self._all_tables - self._ignore_tables) [docs] def get_table_names(self) -> Iterable[str]: """Get names of tables available.""" warnings.warn( "This method is deprecated - please use `get_usable_table_names`." ) return self.get_usable_table_names() @property def table_info(self) -> str: """Information about all tables in the database.""" return self.get_table_info() [docs] def get_table_info(self, table_names: Optional[List[str]] = None) -> str: """Get information about specified tables.
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-7
"""Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ all_table_names = self.get_usable_table_names() if table_names is not None: missing_tables = set(table_names).difference(all_table_names) if missing_tables: raise ValueError(f"table_names {missing_tables} not found in database") all_table_names = table_names meta_tables = [ tbl for tbl in self._metadata.sorted_tables if tbl.name in set(all_table_names) and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_")) ] tables = [] for table in meta_tables: if self._custom_table_info and table.name in self._custom_table_info: tables.append(self._custom_table_info[table.name]) continue # add create table command create_table = str(CreateTable(table).compile(self._engine)) table_info = f"{create_table.rstrip()}" has_extra_info = ( self._indexes_in_table_info or self._sample_rows_in_table_info ) if has_extra_info: table_info += "\n\n/*" if self._indexes_in_table_info: table_info += f"\n{self._get_table_indexes(table)}\n" if self._sample_rows_in_table_info: table_info += f"\n{self._get_sample_rows(table)}\n" if has_extra_info:
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-8
if has_extra_info: table_info += "*/" tables.append(table_info) tables.sort() final_str = "\n\n".join(tables) return final_str def _get_table_indexes(self, table: Table) -> str: indexes = self._inspector.get_indexes(table.name) indexes_formatted = "\n".join(map(_format_index, indexes)) return f"Table Indexes:\n{indexes_formatted}" def _get_sample_rows(self, table: Table) -> str: # build the select command command = select(table).limit(self._sample_rows_in_table_info) # save the columns in string format columns_str = "\t".join([col.name for col in table.columns]) try: # get the sample rows with self._engine.connect() as connection: sample_rows_result = connection.execute(command) # type: ignore # shorten values in the sample rows sample_rows = list( map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result) ) # save the sample rows in string format sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows]) # in some dialects when there are no rows in the table a # 'ProgrammingError' is returned except ProgrammingError: sample_rows_str = "" return ( f"{self._sample_rows_in_table_info} rows from {table.name} table:\n" f"{columns_str}\n" f"{sample_rows_str}" ) def _execute(self, command: str, fetch: Optional[str] = "all") -> Sequence: """ Executes SQL command through underlying engine.
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-9
""" Executes SQL command through underlying engine. If the statement returns no rows, an empty list is returned. """ with self._engine.begin() as connection: if self._schema is not None: if self.dialect == "snowflake": connection.exec_driver_sql( f"ALTER SESSION SET search_path='{self._schema}'" ) elif self.dialect == "bigquery": connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'") elif self.dialect == "mssql": pass else: # postgresql and compatible dialects connection.exec_driver_sql(f"SET search_path TO {self._schema}") cursor = connection.execute(text(command)) if cursor.returns_rows: if fetch == "all": result = cursor.fetchall() elif fetch == "one": result = cursor.fetchone() # type: ignore else: raise ValueError("Fetch parameter must be either 'one' or 'all'") return result return [] [docs] def run(self, command: str, fetch: str = "all") -> str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. """ result = self._execute(command, fetch) # Convert columns values to string to avoid issues with sqlalchemy # truncating text if not result: return "" elif isinstance(result, list): res: Sequence = [ tuple(truncate_word(c, length=self._max_string_length) for c in r) for r in result ]
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
09b56df6cd9e-10
for r in result ] else: res = tuple( truncate_word(c, length=self._max_string_length) for c in result ) return str(res) [docs] def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ try: return self.get_table_info(table_names) except ValueError as e: """Format the error message""" return f"Error: {e}" [docs] def run_no_throw(self, command: str, fetch: str = "all") -> str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned. """ try: return self.run(command, fetch) except SQLAlchemyError as e: """Format the error message""" return f"Error: {e}"
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/sql_database.html
0ece27268d20-0
Source code for langchain.utilities.openapi """Utility functions for parsing an OpenAPI spec.""" import copy import json import logging import re from enum import Enum from pathlib import Path from typing import Dict, List, Optional, Union import requests import yaml from openapi_schema_pydantic import ( Components, OpenAPI, Operation, Parameter, PathItem, Paths, Reference, RequestBody, Schema, ) from pydantic import ValidationError logger = logging.getLogger(__name__) [docs]class HTTPVerb(str, Enum): """Enumerator of the HTTP verbs.""" GET = "get" PUT = "put" POST = "post" DELETE = "delete" OPTIONS = "options" HEAD = "head" PATCH = "patch" TRACE = "trace" @classmethod def from_str(cls, verb: str) -> "HTTPVerb": """Parse an HTTP verb.""" try: return cls(verb) except ValueError: raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}") [docs]class OpenAPISpec(OpenAPI): """OpenAPI Model that removes misformatted parts of the spec.""" @property def _paths_strict(self) -> Paths: if not self.paths: raise ValueError("No paths found in spec") return self.paths def _get_path_strict(self, path: str) -> PathItem: path_item = self._paths_strict.get(path) if not path_item: raise ValueError(f"No path found for {path}") return path_item @property def _components_strict(self) -> Components:
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-1
@property def _components_strict(self) -> Components: """Get components or err.""" if self.components is None: raise ValueError("No components found in spec. ") return self.components @property def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]: """Get parameters or err.""" parameters = self._components_strict.parameters if parameters is None: raise ValueError("No parameters found in spec. ") return parameters @property def _schemas_strict(self) -> Dict[str, Schema]: """Get the dictionary of schemas or err.""" schemas = self._components_strict.schemas if schemas is None: raise ValueError("No schemas found in spec. ") return schemas @property def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]: """Get the request body or err.""" request_bodies = self._components_strict.requestBodies if request_bodies is None: raise ValueError("No request body found in spec. ") return request_bodies def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]: """Get a parameter (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] parameters = self._parameters_strict if ref_name not in parameters: raise ValueError(f"No parameter found for {ref_name}") return parameters[ref_name] def _get_root_referenced_parameter(self, ref: Reference) -> Parameter: """Get the root reference or err.""" parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference):
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-2
parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference): parameter = self._get_referenced_parameter(parameter) return parameter [docs] def get_referenced_schema(self, ref: Reference) -> Schema: """Get a schema (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] schemas = self._schemas_strict if ref_name not in schemas: raise ValueError(f"No schema found for {ref_name}") return schemas[ref_name] [docs] def get_schema(self, schema: Union[Reference, Schema]) -> Schema: if isinstance(schema, Reference): return self.get_referenced_schema(schema) return schema def _get_root_referenced_schema(self, ref: Reference) -> Schema: """Get the root reference or err.""" schema = self.get_referenced_schema(ref) while isinstance(schema, Reference): schema = self.get_referenced_schema(schema) return schema def _get_referenced_request_body( self, ref: Reference ) -> Optional[Union[Reference, RequestBody]]: """Get a request body (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] request_bodies = self._request_bodies_strict if ref_name not in request_bodies: raise ValueError(f"No request body found for {ref_name}") return request_bodies[ref_name] def _get_root_referenced_request_body( self, ref: Reference ) -> Optional[RequestBody]: """Get the root request Body or err.""" request_body = self._get_referenced_request_body(ref) while isinstance(request_body, Reference):
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-3
while isinstance(request_body, Reference): request_body = self._get_referenced_request_body(request_body) return request_body @staticmethod def _alert_unsupported_spec(obj: dict) -> None: """Alert if the spec is not supported.""" warning_message = ( " This may result in degraded performance." + " Convert your OpenAPI spec to 3.1.* spec" + " for better support." ) swagger_version = obj.get("swagger") openapi_version = obj.get("openapi") if isinstance(openapi_version, str): if openapi_version != "3.1.0": logger.warning( f"Attempting to load an OpenAPI {openapi_version}" f" spec. {warning_message}" ) else: pass elif isinstance(swagger_version, str): logger.warning( f"Attempting to load a Swagger {swagger_version}" f" spec. {warning_message}" ) else: raise ValueError( "Attempting to load an unsupported spec:" f"\n\n{obj}\n{warning_message}" ) [docs] @classmethod def parse_obj(cls, obj: dict) -> "OpenAPISpec": try: cls._alert_unsupported_spec(obj) return super().parse_obj(obj) except ValidationError as e: # We are handling possibly misconfigured specs and want to do a best-effort # job to get a reasonable interface out of it. new_obj = copy.deepcopy(obj) for error in e.errors(): keys = error["loc"] item = new_obj for key in keys[:-1]: item = item[key]
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-4
for key in keys[:-1]: item = item[key] item.pop(keys[-1], None) return cls.parse_obj(new_obj) [docs] @classmethod def from_spec_dict(cls, spec_dict: dict) -> "OpenAPISpec": """Get an OpenAPI spec from a dict.""" return cls.parse_obj(spec_dict) [docs] @classmethod def from_text(cls, text: str) -> "OpenAPISpec": """Get an OpenAPI spec from a text.""" try: spec_dict = json.loads(text) except json.JSONDecodeError: spec_dict = yaml.safe_load(text) return cls.from_spec_dict(spec_dict) [docs] @classmethod def from_file(cls, path: Union[str, Path]) -> "OpenAPISpec": """Get an OpenAPI spec from a file path.""" path_ = path if isinstance(path, Path) else Path(path) if not path_.exists(): raise FileNotFoundError(f"{path} does not exist") with path_.open("r") as f: return cls.from_text(f.read()) [docs] @classmethod def from_url(cls, url: str) -> "OpenAPISpec": """Get an OpenAPI spec from a URL.""" response = requests.get(url) return cls.from_text(response.text) @property def base_url(self) -> str: """Get the base url.""" return self.servers[0].url [docs] def get_methods_for_path(self, path: str) -> List[str]: """Return a list of valid methods for the specified path.""" path_item = self._get_path_strict(path) results = []
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-5
path_item = self._get_path_strict(path) results = [] for method in HTTPVerb: operation = getattr(path_item, method.value, None) if isinstance(operation, Operation): results.append(method.value) return results [docs] def get_parameters_for_path(self, path: str) -> List[Parameter]: path_item = self._get_path_strict(path) parameters = [] if not path_item.parameters: return [] for parameter in path_item.parameters: if isinstance(parameter, Reference): parameter = self._get_root_referenced_parameter(parameter) parameters.append(parameter) return parameters [docs] def get_operation(self, path: str, method: str) -> Operation: """Get the operation object for a given path and HTTP method.""" path_item = self._get_path_strict(path) operation_obj = getattr(path_item, method, None) if not isinstance(operation_obj, Operation): raise ValueError(f"No {method} method found for {path}") return operation_obj [docs] def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]: """Get the components for a given operation.""" parameters = [] if operation.parameters: for parameter in operation.parameters: if isinstance(parameter, Reference): parameter = self._get_root_referenced_parameter(parameter) parameters.append(parameter) return parameters [docs] def get_request_body_for_operation( self, operation: Operation ) -> Optional[RequestBody]: """Get the request body for a given operation.""" request_body = operation.requestBody if isinstance(request_body, Reference): request_body = self._get_root_referenced_request_body(request_body) return request_body
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html
0ece27268d20-6
return request_body [docs] @staticmethod def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str: """Get a cleaned operation id from an operation id.""" operation_id = operation.operationId if operation_id is None: # Replace all punctuation of any kind with underscore path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/")) operation_id = f"{path}_{method}" return operation_id.replace("-", "_").replace(".", "_").replace("/", "_")
https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html