id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
60b11c06ae26-1 | )
chains = [
create_draft_answer_chain,
list_assertions_chain,
check_assertions_chain,
revised_answer_chain,
]
question_to_checked_assertions_chain = SequentialChain(
chains=chains,
input_variables=["question"],
output_variables=["revised_statement"],
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html |
60b11c06ae26-2 | def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMCheckerChain with an llm is deprecated. "
"Please instantiate with question_to_checked_assertions_chain "
"or using the from_llm class me... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html |
60b11c06ae26-3 | question = inputs[self.input_key]
output = self.question_to_checked_assertions_chain(
{"question": question}, callbacks=_run_manager.get_child()
)
return {self.output_key: output["revised_statement"]}
@property
def _chain_type(self) -> str:
return "llm_checker_chain"
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html |
d1a2c859779e-0 | Source code for langchain.chains.openai_functions.utils
from typing import Any, Dict
def _resolve_schema_references(schema: Any, definitions: Dict[str, Any]) -> Any:
"""
Resolves the $ref keys in a JSON schema object using the provided definitions.
"""
if isinstance(schema, list):
for i, item in... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/utils.html |
19e43121e35a-0 | Source code for langchain.chains.openai_functions.citation_fuzzy_match
from typing import Iterator, List
from pydantic import BaseModel, Field
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
from langchain... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html |
19e43121e35a-1 | [docs] def get_spans(self, context: str) -> Iterator[str]:
for quote in self.substring_quote:
yield from self._get_span(quote, context)
[docs]class QuestionAnswer(BaseModel):
"""A question and its answer as a list of facts each one should have a source.
each sentence contains a body and a... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html |
19e43121e35a-2 | HumanMessage(
content=(
"Tips: Make sure to cite your sources, "
"and use the exact words from the context."
)
),
]
prompt = ChatPromptTemplate(messages=messages)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html |
8d59e9264cdc-0 | Source code for langchain.chains.openai_functions.extraction
from typing import Any, List
from pydantic import BaseModel
from langchain.base_language import BaseLanguageModel
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import (
_conv... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/extraction.html |
8d59e9264cdc-1 | output_parser = JsonKeyOutputFunctionsParser(key_name="info")
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
)
return chain
[docs]def create_extraction_chain_pydantic(
pydantic_schema... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/extraction.html |
e0ac88b1c1de-0 | Source code for langchain.chains.openai_functions.openapi
import json
import re
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import requests
from openapi_schema_pydantic import Parameter
from requests import Response
from langchain import BasePromptTemplate, L... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-1 | new_val = f"{clean_param}=" + sep.join(val)
else:
new_val = ",".join(val)
elif isinstance(val, dict):
kv_sep = "=" if param[-1] == "*" else ","
kv_strs = [kv_sep.join((k, v)) for k, v in val.items()]
if param[0] == ".":
sep = "."
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-2 | [docs]def openapi_spec_to_openai_fn(
spec: OpenAPISpec,
) -> Tuple[List[Dict[str, Any]], Callable]:
"""Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
functions.
Args:
spec: OpenAPI spec to convert.
Returns:
Tuple of the OpenAI functions JSON schema and... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-3 | # TODO: Support more MIME types.
if request_body and request_body.content:
media_types = {}
for media_type, media_type_object in request_body.content.items():
if media_type_object.media_type_schema:
schema = spec.get_schema(media_ty... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-4 | url = _format_url(url, path_params)
if "data" in fn_args and isinstance(fn_args["data"], dict):
fn_args["data"] = json.dumps(fn_args["data"])
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if "headers" in _kwargs:
_kwargs["headers"].update(headers... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-5 | _text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
_run_manager.on_text(_text)
api_response: Response = self.request_method(name, args)
if api_response.status_code != 200:
response = (
f"{api_response.status_code}: {api_response.reason}"
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
e0ac88b1c1de-6 | for conversion in (
OpenAPISpec.from_url,
OpenAPISpec.from_file,
OpenAPISpec.from_text,
):
try:
spec = conversion(spec) # type: ignore[arg-type]
break
except Exception: # noqa: E722
pass
if isin... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/openapi.html |
7f46dce6bf47-0 | Source code for langchain.chains.openai_functions.qa_with_structure
from typing import Any, List, Optional, Type, Union
from pydantic import BaseModel, Field
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html |
7f46dce6bf47-1 | Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and issubclass(schema, BaseModel)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOut... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html |
7f46dce6bf47-2 | output_parser=_output_parser,
)
return chain
[docs]def create_qa_with_sources_chain(llm: BaseLanguageModel, **kwargs: Any) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
**kwargs: Keyword arguments to... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html |
0eed8317e464-0 | Source code for langchain.chains.openai_functions.tagging
from typing import Any
from langchain.base_language import BaseLanguageModel
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs
from langchain.outp... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/tagging.html |
0eed8317e464-1 | pydantic_schema: Any, llm: BaseLanguageModel
) -> Chain:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract informati... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/tagging.html |
450aec87b08f-0 | Source code for langchain.chains.hyde.base
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from pydantic import Extra
from langchain.base_language import BaseLanguageModel
from langchain.callback... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html |
450aec87b08f-1 | return list(np.array(embeddings).mean(axis=0))
[docs] def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for ... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html |
121545ffb645-0 | Source code for langchain.chains.constitutional_ai.models
"""Models for the Constitutional AI chain."""
from pydantic import BaseModel
[docs]class ConstitutionalPrinciple(BaseModel):
"""Class for a constitutional principle."""
critique_request: str
revision_request: str
name: str = "Constitutional Princ... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/models.html |
608208f63afd-0 | Source code for langchain.chains.constitutional_ai.base
"""Chain for applying constitutional principles to the outputs of another chain."""
from typing import Any, Dict, List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html |
608208f63afd-1 | critique_chain: LLMChain
revision_chain: LLMChain
return_intermediate_steps: bool = False
[docs] @classmethod
def get_principles(
cls, names: Optional[List[str]] = None
) -> List[ConstitutionalPrinciple]:
if names is None:
return list(PRINCIPLES.values())
else:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html |
608208f63afd-2 | ) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
response = self.chain.run(
**inputs,
callbacks=_run_manager.get_child("original"),
)
initial_response = response
input_prompt = self.chain.prompt.format(**inpu... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html |
608208f63afd-3 | _run_manager.on_text(
text=f"Applying {constitutional_principle.name}..." + "\n\n",
verbose=self.verbose,
color="green",
)
_run_manager.on_text(
text="Critique: " + critique + "\n\n",
verbose=self.verbose,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html |
59d982df3663-0 | Source code for langchain.chains.conversation.base
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from pydantic import Extra, Field, root_validator
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer i... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html |
59d982df3663-1 | f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
prompt_variables = values["prompt"].input_variables
expected_keys = memory_keys + [input_key]
if set(expected_keys) != set(prompt_variables):... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html |
ab1c139ace61-0 | Source code for langchain.chains.combine_documents.refine
"""Combining documents by doing a first pass and then refining on more documents."""
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import Callbacks
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html |
ab1c139ace61-1 | """Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
return _output_keys
[docs] class Config:
"""Configuration for this pydantic object."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html |
ab1c139ace61-2 | )
return values
[docs] def combine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine by mapping first chain over all, then stuffing into final chain."""
inputs = self._construct_initial_inputs(docs, **kwargs)
res... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html |
ab1c139ace61-3 | if self.return_intermediate_steps:
extra_return_dict = {"intermediate_steps": refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
def _construct_refine_inputs(self, doc: Document, res: str) -> Dict[str, Any]:
return {
self.document_var... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html |
6d301d62c6b3-0 | Source code for langchain.chains.combine_documents.stuff
"""Chain that combines documents by stuffing into context."""
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base impo... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html |
6d301d62c6b3-1 | if "document_variable_name" not in values:
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html |
6d301d62c6b3-2 | """Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
[docs] async def acombine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **k... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html |
3834d864138d-0 | Source code for langchain.chains.combine_documents.base
"""Base interface for chains combining documents."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManag... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html |
3834d864138d-1 | :meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
[docs] def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Return th... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html |
3834d864138d-2 | self,
inputs: Dict[str, List[Document]],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
docs = inputs[self.input_key]
# Other keys are assumed to be needed fo... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html |
3834d864138d-3 | docs = self.text_splitter.create_documents([document])
# Other keys are assumed to be needed for LLM prediction
other_keys: Dict = {k: v for k, v in inputs.items() if k != self.input_key}
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(
oth... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html |
79a748b73289-0 | Source code for langchain.chains.combine_documents.map_reduce
"""Combining documents by mapping a chain over them first, then combining results."""
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple
from pydantic import Extra, root_validator
from langchain.callbac... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
79a748b73289-1 | new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: CombineDocsProtocol,
**kwargs: Any,
) -> Document:
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.i... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
79a748b73289-2 | _output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
return _output_keys
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[do... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
79a748b73289-3 | if self.collapse_document_chain is not None:
return self.collapse_document_chain
else:
return self.combine_document_chain
[docs] def combine_docs(
self,
docs: List[Document],
token_max: int = 3000,
callbacks: Callbacks = None,
**kwargs: Any,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
79a748b73289-4 | )
def _process_results_common(
self,
results: List[Dict],
docs: List[Document],
token_max: int = 3000,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
question_result_key = self.llm_chain.output_key
result_docs = [
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
79a748b73289-5 | self,
results: List[Dict],
docs: List[Document],
token_max: int = 3000,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
result_docs, extra_return_dict = self._process_results_common(
results, docs, token_max, callbacks=callbacks, **kwarg... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html |
cbdfc24aa8e9-0 | Source code for langchain.chains.combine_documents.map_rerank
"""Combining documents by mapping a chain over them first, then reranking results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
from pydantic import Extra, root_validator
from langchain.call... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html |
cbdfc24aa8e9-1 | if self.metadata_keys is not None:
_output_keys += self.metadata_keys
return _output_keys
[docs] @root_validator()
def validate_llm_output(cls, values: Dict) -> Dict:
"""Validate that the combine chain outputs a dictionary."""
output_parser = values["llm_chain"].prompt.output_... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html |
cbdfc24aa8e9-2 | "multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_v... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html |
cbdfc24aa8e9-3 | def _process_results(
self,
docs: List[Document],
results: Sequence[Union[str, List[str], Dict[str, str]]],
) -> Tuple[str, dict]:
typed_results = cast(List[dict], results)
sorted_res = sorted(
zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key])
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html |
f37b780196ef-0 | Source code for langchain.chains.query_constructor.ir
"""Internal representation of a structured query language."""
from __future__ import annotations
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, List, Optional, Sequence, Union
from pydantic import BaseModel
[docs]class Visitor(ABC)... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/ir.html |
f37b780196ef-1 | snake_case = ""
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += "_" + char.lower()
else:
snake_case += char.lower()
return snake_case
[docs]class Expr(BaseModel):
[docs] def accept(self, visitor: Visitor) -> Any:
return getattr(visit... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/ir.html |
bf89a9d0ec71-0 | Source code for langchain.chains.query_constructor.schema
from pydantic import BaseModel
[docs]class AttributeInfo(BaseModel):
"""Information about a data source attribute."""
name: str
description: str
type: str
[docs] class Config:
"""Configuration for this pydantic object."""
arbit... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/schema.html |
1d550d62b351-0 | Source code for langchain.chains.query_constructor.parser
import datetime
from typing import Any, Optional, Sequence, Union
try:
import lark
from packaging import version
if version.parse(lark.__version__) < version.parse("1.1.5"):
raise ValueError(
f"Lark should be at least version 1.1.... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html |
1d550d62b351-1 | %import common.WS
%ignore WS
"""
@v_args(inline=True)
class QueryTransformer(Transformer):
"""Transforms a query string into an IR representation
(intermediate representation)."""
def __init__(
self,
*args: Any,
allowed_comparators: Optional[Sequence[Comparator]] = None,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html |
1d550d62b351-2 | if func_name not in self.allowed_operators:
raise ValueError(
f"Received disallowed operator {func_name}. Allowed operators"
f" are {self.allowed_operators}"
)
return Operator(func_name)
else:
raise V... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html |
1d550d62b351-3 | Lark parser for the query language.
"""
transformer = QueryTransformer(
allowed_comparators=allowed_comparators, allowed_operators=allowed_operators
)
return Lark(GRAMMAR, parser="lalr", transformer=transformer, start="program") | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html |
d09e7d01eaf7-0 | Source code for langchain.chains.query_constructor.base
"""LLM Chain for turning a user text query into a structured query."""
from __future__ import annotations
import json
from typing import Any, Callable, List, Optional, Sequence
from langchain import BasePromptTemplate, FewShotPromptTemplate, LLMChain
from langchai... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html |
d09e7d01eaf7-1 | parsed.pop("limit", None)
return StructuredQuery(
**{k: v for k, v in parsed.items() if k in allowed_keys}
)
except Exception as e:
raise OutputParserException(
f"Parsing text\n{text}\n raised following error:\n{e}"
)
[docs] @cla... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html |
d09e7d01eaf7-2 | if enable_limit:
schema = SCHEMA_WITH_LIMIT.format(
allowed_comparators=" | ".join(allowed_comparators),
allowed_operators=" | ".join(allowed_operators),
)
examples = examples or EXAMPLES_WITH_LIMIT
else:
schema = DEFAULT_SCHEMA.format(
allowed_com... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html |
d09e7d01eaf7-3 | attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: An optional list of allowed comparators.
allowed_operators: An optional list of allowed operators.
enable_... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html |
ca9d131bcb58-0 | Source code for langchain.chains.llm_math.base
"""Chain that interprets a prompt and executes python code to do math."""
from __future__ import annotations
import math
import re
import warnings
from typing import Any, Dict, List, Optional
import numexpr
from pydantic import Extra, root_validator
from langchain.base_lan... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html |
ca9d131bcb58-1 | if "llm" in values:
warnings.warn(
"Directly instantiating an LLMMathChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"]... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html |
ca9d131bcb58-2 | ) -> Dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_exp... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html |
ca9d131bcb58-3 | elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
def _call(
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html |
ca9d131bcb58-4 | [docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMMathChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html |
3b8bef3cb6c1-0 | Source code for langchain.chains.llm_summarization_checker.base
"""Chain for summarization with self-verification."""
from __future__ import annotations
import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.base_language import Ba... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html |
3b8bef3cb6c1-1 | verbose=verbose,
),
LLMChain(
llm=llm,
prompt=check_assertions_prompt,
output_key="checked_assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=revised_summary_prompt,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html |
3b8bef3cb6c1-2 | input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
max_checks: int = 2
"""Maximum number of times to check the assertions. Default to double-checking."""
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html |
3b8bef3cb6c1-3 | return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
all_true = False
count = 0
output =... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html |
3b8bef3cb6c1-4 | llm,
create_assertions_prompt,
check_assertions_prompt,
revised_summary_prompt,
are_all_true_prompt,
verbose=verbose,
)
return cls(sequential_chain=chain, verbose=verbose, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html |
8108765a5630-0 | Source code for langchain.chains.qa_with_sources.vector_db
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from pydantic import Field, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChai... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html |
8108765a5630-1 | for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: Call... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html |
47ea70479bbc-0 | Source code for langchain.chains.qa_with_sources.base
"""Question answering with sources over documents."""
from __future__ import annotations
import inspect
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.base_language... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
47ea70479bbc-1 | def from_llm(
cls,
llm: BaseLanguageModel,
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
question_prompt: BasePromptTemplate = QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construc... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
47ea70479bbc-2 | """Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
47ea70479bbc-3 | docs = self._get_docs(inputs) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs, callbacks=_run_manager.get_child(), **inputs
)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
47ea70479bbc-4 | answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return r... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
7e45aed89866-0 | Source code for langchain.chains.qa_with_sources.loading
"""Load question answering with sources chains."""
from __future__ import annotations
from typing import Any, Mapping, Optional, Protocol
from langchain.base_language import BaseLanguageModel
from langchain.chains.combine_documents.base import BaseCombineDocument... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/loading.html |
7e45aed89866-1 | return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_prom... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/loading.html |
7e45aed89866-2 | _reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose)
combine_document_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
verbose=v... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/loading.html |
7e45aed89866-3 | refine_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/loading.html |
7e45aed89866-4 | "refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
raise ValueError(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
_func: LoadingCallable = loader_mapping[cha... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/loading.html |
d223baef8742-0 | Source code for langchain.chains.qa_with_sources.retrieval
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from pydantic import Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.combine_doc... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html |
d223baef8742-1 | return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = self.retriever.get_relevant_documents(
question, callbacks=run_manager.get_child()
)
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html |
ad821afb5814-0 | Source code for langchain.chains.summarize.__init__
"""Load summarizing chains."""
from typing import Any, Mapping, Optional, Protocol
from langchain.base_language import BaseLanguageModel
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce im... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/summarize/__init__.html |
ad821afb5814-1 | combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_document_variable_name: str = "text",
map_reduce_document_variable_name: str = "text",
collapse_prompt: Optional[BasePromptTemplate] = None,
reduce_llm: Optional[BaseLanguageModel] = None,
collapse_llm: Optional[BaseLanguageModel... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/summarize/__init__.html |
ad821afb5814-2 | collapse_document_chain=collapse_chain,
verbose=verbose,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
document_variable_name: str = "text",
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/summarize/__init__.html |
ad821afb5814-3 | verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
Returns:
A chain to use for summarizing.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/summarize/__init__.html |
810ec8286f39-0 | Source code for langchain.chains.qa_generation.base
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base i... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_generation/base.html |
810ec8286f39-1 | def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, List]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_generation/base.html |
e84c16250f2d-0 | Source code for langchain.chains.api.base
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.ca... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html |
e84c16250f2d-1 | if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
[docs] @root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html |
e84c16250f2d-2 | )
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = input... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html |
e84c16250f2d-3 | requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_doc... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html |
72cd09940d58-0 | Source code for langchain.chains.api.openapi.chain
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
import json
from typing import Any, Dict, List, NamedTuple, Optional, cast
from pydantic import BaseModel, Field
from requests import Response
from la... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html |
72cd09940d58-1 | """
return [self.instructions_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html |
72cd09940d58-2 | path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {
"url": path,
"data": body_params,
"params": query_params,
}
def _get_output(self, output: str, intermediate_ste... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html |
72cd09940d58-3 | method = getattr(self.requests, self.api_operation.method.value)
api_response: Response = method(**request_args)
if api_response.status_code != 200:
method_str = str(self.api_operation.method.value)
response_text = (
f"{api_response.status_code... | https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.