id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
c21ace92c567-2 | )
self.logger.report_text(warning, level=30, print_console=True)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
def _init_resp(self) -> Dic... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-3 | if self.stream_logs:
self.logger.report_text(resp)
[docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-4 | chain_input = inputs["input"]
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-5 | self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(res... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-6 | if self.stream_logs:
self.logger.report_text(resp)
[docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-7 | """
resp = {}
textstat = import_textstat()
spacy = import_spacy()
if self.complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-8 | dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
dep_output_path = Path(
self.temp_dir.name, hash_string(f"dep-{text}") + ".html"
)
dep_output_path.open("w", encoding="utf-8").write(dep_out)
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-9 | "flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_stan... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-10 | finish: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the preformed session so far so it is identifyable
langchain_asset: The langchain asset to save.
finish: Whether to ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
c21ace92c567-11 | )
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except NotImplementedError as e:
print("Could not save model.")
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html |
a765de2a2764-0 | Source code for langchain.callbacks.file
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain.callbacks.base import BaseCallbackHandler
from langchain.input import print_text
from langchain.schema import AgentAction, AgentFinish
[docs]class FileCallbackHandle... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/file.html |
a765de2a2764-1 | ) -> Any:
"""Run on agent action."""
print_text(action.log, color=color if color else self.color, file=self.file)
[docs] def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/file.html |
a737591768d4-0 | Source code for langchain.callbacks.arthur_callback
"""ArthurAI's Callback Handler."""
from __future__ import annotations
import os
import uuid
from collections import defaultdict
from datetime import datetime
from time import time
from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional, Union
import n... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-1 | """
def __init__(
self,
arthur_model: ArthurModel,
) -> None:
"""Initialize callback handler."""
super().__init__()
arthurai = _lazy_load_arthur()
Stage = arthurai.common.constants.Stage
ValueType = arthurai.common.constants.ValueType
self.arthur_m... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-2 | arthur_url: Optional[str] = "https://app.arthur.ai",
arthur_login: Optional[str] = None,
arthur_password: Optional[str] = None,
) -> ArthurCallbackHandler:
"""Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-3 | )
# get model from Arthur by the provided model ID
try:
arthur_model = arthur.get_model(model_id)
except ResponseClientError:
raise ValueError(
f"Was unable to retrieve model with id {model_id} from Arthur."
" Make sure the ID corresponds t... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-4 | " Restart and try running the LLM again"
) from e
# mark the duration time between on_llm_start() and on_llm_end()
time_from_start_to_end = time() - run_map_data["start_time"]
# create inferences to log to Arthur
inferences = []
for i, generations in enumerate(respons... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-5 | # add token usage counts to the inference if the
# ArthurModel was registered to monitor token usage
if (
isinstance(response.llm_output, dict)
and TOKEN_USAGE in response.llm_output
):
token_usage = response.llm... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
a737591768d4-6 | """On new token, pass."""
[docs] def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing when LLM chain outputs an error."""
[docs] def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arthur_callback.html |
995936f8b4fc-0 | Source code for langchain.callbacks.mlflow_callback
import random
import string
import tempfile
import traceback
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
Bas... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-1 | "flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(te... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-2 | doc, style="ent", jupyter=False, page=True
)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
[docs]def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
"... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-3 | # User can set other env variables described here
# > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server
experiment_name = get_from_dict_or_env(
kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME"
)
self.mlf_exp = self.mlflow.get_experiment_by_name(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-4 | ):
self.mlflow.log_metric(key, value)
def metrics(
self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0
) -> None:
"""To log all metrics in the input dict."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_e... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-5 | def artifact(self, path: str) -> None:
"""To upload the file from given path as artifact."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_artifact(path)
def langchain_artifact(self, chain: Any) -> None:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-6 | super().__init__()
self.name = name
self.experiment = experiment
self.tags = tags
self.tracking_uri = tracking_uri
self.temp_dir = tempfile.TemporaryDirectory()
self.mlflg = MlflowLogger(
tracking_uri=self.tracking_uri,
experiment_name=self.experim... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-7 | self.metrics[k] = 0
for k, v in self.records.items():
self.records[k] = []
[docs] def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] +=... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-8 | self.records["on_llm_token_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}")
[docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-9 | dependency_tree = generation_resp["dependency_tree"]
entities = generation_resp["entities"]
self.mlflg.html(dependency_tree, "dep-" + hash_string(generation.text))
self.mlflg.html(entities, "ent-" + hash_string(generation.text))
[docs] def on_llm_error(
self, e... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-10 | """Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-11 | self.records["on_tool_start_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_start_{tool_starts}")
[docs] def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.metrics["step"] += 1
self... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-12 | self.records["on_text_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"on_text_{text_ctr}")
[docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
sel... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-13 | self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_agent_action_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"agent_action_{tool_starts}")
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-14 | [
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
995936f8b4fc-15 | try:
langchain_asset.save(langchain_asset_path)
self.mlflg.artifact(langchain_asset_path)
except ValueError:
try:
langchain_asset.save_agent(langchain_asset_path)
self.mlflg.artifact(langchain_ass... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html |
9eca2a40019e-0 | Source code for langchain.callbacks.streaming_stdout
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Dict, List, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
[docs]class StreamingStdOutCallba... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout.html |
9eca2a40019e-1 | ) -> None:
"""Run when chain errors."""
[docs] def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
[docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout.html |
0fe6c7514e24-0 | Source code for langchain.callbacks.human
from typing import Any, Callable, Dict, Optional
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
def _default_approve(_input: str) -> bool:
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-inse... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/human.html |
ea5440cc53c6-0 | Source code for langchain.callbacks.stdout
"""Callback Handler that prints to std out."""
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.input import print_text
from langchain.schema import AgentAction, AgentFinish, LLMResult
[docs]class StdOu... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html |
ea5440cc53c6-1 | """Print out that we finished a chain."""
print("\n\033[1m> Finished chain.\033[0m")
[docs] def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
[docs] def on_tool_start(
self,
serialized: Dict[str... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html |
ea5440cc53c6-2 | color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
print_text(text, color=color if color else self.color, end=end)
[docs] def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html |
e4d304b7ac42-0 | Source code for langchain.callbacks.arize_callback
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import import_pandas
from langchain.schema import AgentAction, AgentFinish, LLMResult
[docs]class A... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html |
e4d304b7ac42-1 | self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY":
raise ValueError("❌ CHANGE SPACE AND API KEYS")
else:
print("✅ Arize client setup done! Now you can start using Arize!")
[docs] def on_llm_start(
self,... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html |
e4d304b7ac42-2 | for generations in response.generations:
for generation in generations:
prompt = self.prompt_records[self.step]
self.step = self.step + 1
prompt_embedding = pd.Series(
self.generator.generate_embeddings(
text_col=pd.... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html |
e4d304b7ac42-3 | "completion_token",
"total_token",
],
prompt_column_names=prompt_columns,
response_column_names=response_columns,
)
response_from_arize = self.arize_client.log(
dataframe=df,
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html |
e4d304b7ac42-4 | pass
[docs] def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
pass
[docs] def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html |
ff057f45a2ed-0 | Source code for langchain.callbacks.comet_ml_callback
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetad... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-1 | "automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textsta... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-2 | stream_logs (bool): Whether to stream callback actions to Comet
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-3 | "based on updates to `langchain`. Please report any issues to "
"https://github.com/comet-ml/issue-tracking/issues with the tag "
"`langchain`."
)
self.comet_ml.LOGGER.warning(warning)
self.callback_columns: list = []
self.action_records: list = []
self.co... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-4 | self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.action_records.append(resp)
[docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM end... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-5 | [docs] def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
[docs] def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Non... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-6 | if isinstance(chain_output_val, str):
output_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_output_val, resp, self.step)
output_resp.update({chain_output_key: chain_output_val})
self.action_records.append(output_resp)... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-7 | resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
[docs] def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> N... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-8 | """Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
tool = action.tool
tool_input = str(action.tool_input)
log = action.log
resp = self._init_resp()
resp.update({"action": "on_agent_action", "log": log, "tool": tool})
r... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-9 | return resp
[docs] def flush_tracker(
self,
langchain_asset: Any = None,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = "comet-langchain-demo",
tags: Optional[Sequence] = None,
name: Optional[str] = None,
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-10 | self.experiment.log_text(prompt, metadata=metadata, step=step)
def _log_model(self, langchain_asset: Any) -> None:
model_parameters = self._get_llm_parameters(langchain_asset)
self.experiment.log_parameters(model_parameters, prefix="model")
langchain_asset_path = Path(self.temp_dir.name, "mo... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-11 | # Log the langchain low-level records as a JSON file directly
self.experiment.log_asset_data(
self.action_records, "langchain-action_records.json", metadata=metadata
)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-12 | )
self.experiment.log_asset_data(
html,
name=f"langchain-viz-{visualization}-{idx}.html",
metadata={"prompt": prompt},
step=idx,
)
except Exception as e:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
ff057f45a2ed-13 | self.reset_callback_meta()
self.temp_dir = tempfile.TemporaryDirectory()
def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict:
pd = import_pandas()
llm_parameters = self._get_llm_parameters(langchain_asset)
num_generations_per_prompt = llm_parameters.get(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html |
e872b65e71a3-0 | Source code for langchain.callbacks.infino_callback
import time
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
[docs]def import_infino() -> Any:
try:
from infinopy import InfinoClie... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html |
e872b65e71a3-1 | "labels": {
"model_id": self.model_id,
"model_version": self.model_version,
},
}
if self.verbose:
print(f"Tracking {key} with Infino: {payload}")
# Append to Infino time series only if is_ts is True, otherwise
# append to Infino log... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html |
e872b65e71a3-2 | # Track success or error flag.
self._send_to_infino("error", self.error)
# Track token usage.
if (response.llm_output is not None) and isinstance(response.llm_output, Dict):
token_usage = response.llm_output["token_usage"]
if token_usage is not None:
promp... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html |
e872b65e71a3-3 | self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
[docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
[docs]... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html |
642632fd9f06-0 | Source code for langchain.callbacks.promptlayer_callback
"""Callback handler for promptlayer."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
from langchain.s... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/promptlayer_callback.html |
642632fd9f06-1 | **kwargs: Any,
) -> Any:
self.runs[run_id] = {
"messages": [self._create_message_dicts(m)[0] for m in messages],
"invocation_params": kwargs.get("invocation_params", {}),
"name": ".".join(serialized["id"]),
"request_start_time": datetime.datetime.now().timesta... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/promptlayer_callback.html |
642632fd9f06-2 | resp = {
"text": generation.text,
"llm_output": response.llm_output,
}
model_params = run_info.get("invocation_params", {})
is_chat_model = run_info.get("messages", None) is not None
model_input = (
run_info.get("messages", ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/promptlayer_callback.html |
642632fd9f06-3 | message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/promptlayer_callback.html |
ccfb58d20ead-0 | Source code for langchain.callbacks.streaming_aiter_final_only
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
from langchain.schema import LLMResult
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
[docs... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter_final_only.html |
ccfb58d20ead-1 | """
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter_final_only.html |
ccfb58d20ead-2 | # If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token) | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter_final_only.html |
556b013d7dd2-0 | Source code for langchain.callbacks.flyte_callback
"""FlyteKit callback handler."""
from __future__ import annotations
import logging
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-1 | files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
textstat = import_textstat()
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-2 | doc, style="dep", jupyter=False, page=True
)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visua... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-3 | self.deck = flytekit.Deck(
"LangChain Metrics",
self.markdown_renderer().to_html("## LangChain Metrics"),
)
[docs] def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-4 | self.deck.append(self.markdown_renderer().to_html("### LLM End"))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp])))
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-5 | self.errors += 1
[docs] def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-6 | )
[docs] def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
[docs] def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-7 | ) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
[docs] def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
556b013d7dd2-8 | resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.dec... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/flyte_callback.html |
b6234a04a4e2-0 | Source code for langchain.callbacks.base
"""Base callback handler that can be used to handle callbacks in langchain."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Union
from uuid import UUID
from langchain.schema import AgentAction, AgentFinish, BaseMessage, Document, LLM... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-1 | def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run when LLM errors."""
class ChainManagerMixin:
"""Mixin for chain callbacks."""
def on_chain_end(
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-2 | parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run when tool ends running."""
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-3 | def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
"""Run when chain starts running."""
def on_tool... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-4 | return False
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return False
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return False
@property
def ignore_chat_model(self) -> bool:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-5 | **kwargs: Any,
) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
[docs] async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
"""Run when ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-6 | ) -> None:
"""Run when chain errors."""
[docs] async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-7 | finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
"""Run on agent end."""
[docs] async def on_retriever_start(
self,
query: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = Non... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-8 | """Initialize callback manager."""
self.handlers: List[BaseCallbackHandler] = handlers
self.inheritable_handlers: List[BaseCallbackHandler] = (
inheritable_handlers or []
)
self.parent_run_id: Optional[UUID] = parent_run_id
self.tags = tags or []
self.inherita... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
b6234a04a4e2-9 | self.remove_tags([tag])
self.tags.extend(tags)
if inherit:
self.inheritable_tags.extend(tags)
[docs] def remove_tags(self, tags: List[str]) -> None:
for tag in tags:
self.tags.remove(tag)
self.inheritable_tags.remove(tag) | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/base.html |
8fe48d83594c-0 | Source code for langchain.callbacks.openai_info
"""Callback Handler that prints to std out."""
from typing import Any, Dict, List
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
MODEL_COST_PER_1K_TOKENS = {
# GPT-4 input
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html |
8fe48d83594c-1 | "gpt-3.5-turbo-16k-0613": 0.003,
# GPT-3.5 output
"gpt-3.5-turbo-completion": 0.002,
"gpt-3.5-turbo-0301-completion": 0.002,
"gpt-3.5-turbo-0613-completion": 0.002,
"gpt-3.5-turbo-16k-completion": 0.004,
"gpt-3.5-turbo-16k-0613-completion": 0.004,
# Others
"gpt-35-turbo": 0.002, # Azure... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html |
8fe48d83594c-2 | is_completion: bool = False,
) -> str:
"""
Standardize the model name to a format that can be used in the OpenAI API.
Args:
model_name: Model name to standardize.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Standardized ... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html |
8fe48d83594c-3 | [docs]class OpenAICallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks OpenAI info."""
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
total_cost: float = 0.0
def __repr__(self) -> str:
return (
f"Token... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html |
8fe48d83594c-4 | prompt_tokens = token_usage.get("prompt_tokens", 0)
model_name = standardize_model_name(response.llm_output.get("model_name", ""))
if model_name in MODEL_COST_PER_1K_TOKENS:
completion_cost = get_openai_token_cost_for_model(
model_name, completion_tokens, is_completion=True
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html |
e094b9792870-0 | Source code for langchain.callbacks.streaming_stdout_final_only
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Dict, List, Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
[docs... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout_final_only.html |
e094b9792870-1 | """
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip(... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout_final_only.html |
516ae6354a29-0 | Source code for langchain.callbacks.whylabs_callback
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult
from langchain.u... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html |
516ae6354a29-1 | return langkit
[docs]class WhyLabsCallbackHandler(BaseCallbackHandler):
"""WhyLabs CallbackHandler."""
def __init__(self, logger: Logger):
"""Initiate the rolling logger"""
super().__init__()
self.logger = logger
diagnostic_logger.info(
"Initialized WhyLabs callback h... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html |
516ae6354a29-2 | """Do nothing."""
[docs] def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
[docs] def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
... | https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.