id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
c48fab993cdb-7 | langchain.embeddings.tensorflow_hub
langchain.embeddings.vertexai
langchain.env
langchain.evaluation.agents.trajectory_eval_chain
langchain.evaluation.comparison.eval_chain
langchain.evaluation.criteria.eval_chain
langchain.evaluation.loading
langchain.evaluation.qa.eval_chain
langchain.evaluation.qa.generate_chain
lan... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-8 | langchain.llms.azureml_endpoint
langchain.llms.bananadev
langchain.llms.base
langchain.llms.baseten
langchain.llms.beam
langchain.llms.bedrock
langchain.llms.cerebriumai
langchain.llms.clarifai
langchain.llms.cohere
langchain.llms.ctransformers
langchain.llms.databricks
langchain.llms.deepinfra
langchain.llms.fake
lang... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-9 | langchain.math_utils
langchain.memory.buffer
langchain.memory.buffer_window
langchain.memory.chat_memory
langchain.memory.chat_message_histories.cassandra
langchain.memory.chat_message_histories.cosmos_db
langchain.memory.chat_message_histories.dynamodb
langchain.memory.chat_message_histories.file
langchain.memory.chat... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-10 | langchain.prompts.loading
langchain.prompts.pipeline
langchain.prompts.prompt
langchain.requests
langchain.retrievers.arxiv
langchain.retrievers.azure_cognitive_search
langchain.retrievers.chatgpt_plugin_retriever
langchain.retrievers.contextual_compression
langchain.retrievers.databerry
langchain.retrievers.docarray
l... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-11 | langchain.schema
langchain.server
langchain.sql_database
langchain.text_splitter
langchain.tools.arxiv.tool
langchain.tools.azure_cognitive_services.form_recognizer
langchain.tools.azure_cognitive_services.image_analysis
langchain.tools.azure_cognitive_services.speech2text
langchain.tools.azure_cognitive_services.text2... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-12 | langchain.tools.playwright.current_page
langchain.tools.playwright.extract_hyperlinks
langchain.tools.playwright.extract_text
langchain.tools.playwright.get_elements
langchain.tools.playwright.navigate
langchain.tools.playwright.navigate_back
langchain.tools.playwright.utils
langchain.tools.plugin
langchain.tools.power... | https://api.python.langchain.com/en/latest/_modules/index.html |
c48fab993cdb-13 | langchain.vectorstores.annoy
langchain.vectorstores.atlas
langchain.vectorstores.awadb
langchain.vectorstores.azuresearch
langchain.vectorstores.base
langchain.vectorstores.cassandra
langchain.vectorstores.chroma
langchain.vectorstores.clarifai
langchain.vectorstores.clickhouse
langchain.vectorstores.deeplake
langchain... | https://api.python.langchain.com/en/latest/_modules/index.html |
aa2462be5f66-0 | Source code for langchain.utils
"""Generic utility functions."""
import contextlib
import datetime
import importlib
import os
from typing import Any, Callable, Dict, List, Optional, Tuple
from requests import HTTPError, Response
[docs]def get_from_dict_or_env(
data: Dict[str, Any], key: str, env_key: str, default: ... | https://api.python.langchain.com/en/latest/_modules/langchain/utils.html |
aa2462be5f66-1 | for arg_group in arg_groups
]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError(
"Exactly one argument i... | https://api.python.langchain.com/en/latest/_modules/langchain/utils.html |
aa2462be5f66-2 | return text
[docs]def comma_list(items: List[Any]) -> str:
return ", ".join(str(item) for item in items)
[docs]@contextlib.contextmanager
def mock_now(dt_value): # type: ignore
"""Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11))... | https://api.python.langchain.com/en/latest/_modules/langchain/utils.html |
4d4d679f8dc3-0 | Source code for langchain.server
"""Script to run langchain-server locally using docker-compose."""
import subprocess
from pathlib import Path
from langchainplus_sdk.cli.main import get_docker_compose_command
[docs]def main() -> None:
"""Run the langchain server locally."""
p = Path(__file__).absolute().parent ... | https://api.python.langchain.com/en/latest/_modules/langchain/server.html |
b6a2e3f4b850-0 | Source code for langchain.cache
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Option... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-1 | Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-2 | """Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
[docs]class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-3 | logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-4 | """Initialize by passing in Redis instance."""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-5 | )
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
[docs]class RedisSemanticCache(BaseCache):
"""Cache that uses Redis... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-6 | # return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-7 | )
if results:
for document in results:
for text in document.metadata["return_val"]:
generations.append(Generation(text=text))
return generations if generations else None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) ... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-8 | # Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-9 | else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache obje... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-10 | if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-11 | [docs]class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-12 | self.cache_name = cache_name
self.ttl = ttl
[docs] @classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> ... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-13 | Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
b6a2e3f4b850-14 | from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
[docs] def clear(self,... | https://api.python.langchain.com/en/latest/_modules/langchain/cache.html |
07683b2d904f-0 | Source code for langchain.math_utils
"""Math utils."""
from typing import List, Optional, Tuple, Union
import numpy as np
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
[docs]def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices.""... | https://api.python.langchain.com/en/latest/_modules/langchain/math_utils.html |
07683b2d904f-1 | second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
sorted_idxs = score_array.flatten().argsort()[::-1]
top_k = top_k or len(sorted_idxs)
top_idxs = sorted_idxs[:top_k]
score_threshold = score_thres... | https://api.python.langchain.com/en/latest/_modules/langchain/math_utils.html |
15cd4ddf1ce2-0 | Source code for langchain.example_generator
"""Utility functions for working with prompts."""
from typing import List
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTe... | https://api.python.langchain.com/en/latest/_modules/langchain/example_generator.html |
91392afd6fea-0 | Source code for langchain.schema
"""Common schema objects."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from inspect import signature
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
NamedTuple,
Optio... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-1 | return "\n".join(string_messages)
@dataclass
class AgentAction:
"""Agent's action to take."""
tool: str
tool_input: Union[str, dict]
log: str
[docs]class AgentFinish(NamedTuple):
"""Agent's return value."""
return_values: dict
log: str
[docs]class Generation(Serializable):
"""Output of a... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-2 | """Type of the message, used for serialization."""
return "ai"
[docs]class SystemMessage(BaseMessage):
"""Type of message that is a system message."""
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "system"
[docs]class FunctionMessage(BaseM... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-3 | else:
raise ValueError(f"Got unexpected type: {_type}")
[docs]def messages_from_dict(messages: List[dict]) -> List[BaseMessage]:
"""Convert messages from dict.
Args:
messages: List of messages (dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-4 | """Flatten generations into a single list."""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_l... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-5 | """Input keys this memory class will load dynamically."""
[docs] @abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain.
If None, return all memories
"""
[docs] @abstractmethod
def save_... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-6 | self.add_message(HumanMessage(content=message))
[docs] def add_ai_message(self, message: str) -> None:
"""Add an AI message to the store"""
self.add_message(AIMessage(content=message))
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-7 | ):
warnings.warn(
"Retrievers must implement abstract `_aget_relevant_documents` method"
" instead of `aget_relevant_documents`",
DeprecationWarning,
)
aswap = cls.aget_relevant_documents
cls.aget_relevant_documents = ( # t... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-8 | List of relevant documents
"""
[docs] def get_relevant_documents(
self, query: str, *, callbacks: Callbacks = None, **kwargs: Any
) -> List[Document]:
"""Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: C... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-9 | List of relevant documents
"""
from langchain.callbacks.manager import AsyncCallbackManager
callback_manager = AsyncCallbackManager.configure(
callbacks, None, verbose=kwargs.get("verbose", False)
)
run_manager = await callback_manager.on_retriever_start(
... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-10 | """Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Args:
text: output of language model
Returns:
structured output
"""
[docs] def parse_with_prompt(self, completi... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
91392afd6fea-11 | @property
def _type(self) -> str:
return "default"
[docs] def parse(self, text: str) -> str:
return text
[docs]class OutputParserException(ValueError):
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or ... | https://api.python.langchain.com/en/latest/_modules/langchain/schema.html |
2a018cb7ab09-0 | Source code for langchain.input
"""Handle chained inputs."""
from typing import Dict, List, Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
[docs]def get_color_mapping(
items: List[str], excluded_colors: Optional[Li... | https://api.python.langchain.com/en/latest/_modules/langchain/input.html |
2a018cb7ab09-1 | print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file | https://api.python.langchain.com/en/latest/_modules/langchain/input.html |
e590a2e6137f-0 | Source code for langchain.sql_database
"""SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional
import sqlalchemy
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from ... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-1 | ):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-2 | if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-3 | """
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databrick... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-4 | engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Datab... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-5 | )
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databri... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-6 | (https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_name... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-7 | tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-8 | If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e590a2e6137f-9 | (https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
exce... | https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html |
e5c28a0e251c-0 | Source code for langchain.base_language
"""Base class for all language models."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Sequence, Set
from langchain.callbacks.manager import Callbacks
from langchain.load.serializable import Serializable
from langc... | https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html |
e5c28a0e251c-1 | callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Take in a list of prompt values and return an LLMResult."""
[docs] @abstractmethod
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Predict text from text."""
[d... | https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html |
e5c28a0e251c-2 | """Get the number of tokens in the message."""
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
[docs] @classmethod
def all_required_field_names(cls) -> Set:
all_required_field_names = set()
for field in cls.__fields__.values():
all_required_field_na... | https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html |
b139a8d65875-0 | Source code for langchain.formatting
"""Utilities for formatting strings."""
from string import Formatter
from typing import Any, List, Mapping, Sequence, Union
[docs]class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
[docs] def check_unused_args(
self,
us... | https://api.python.langchain.com/en/latest/_modules/langchain/formatting.html |
7e1411078879-0 | Source code for langchain.text_splitter
"""Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-1 | """Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
) -> None:
"""Create a new TextS... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-2 | metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-3 | )
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-4 | "Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
[docs] @classmethod
def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_speci... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-5 | [docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
[docs] async def atransform_documents(
self, documents: Sequence[Doc... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-6 | ):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
"""
# Output line-by-line or aggregated into chunks w/ common headers
self.return_each_line = return... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-7 | lines = text.split("\n")
# Final output
lines_with_metadata: List[LineType] = []
# Content and metadata of the chunk currently being processed
current_content: List[str] = []
current_metadata: Dict[str, str] = {}
# Keep track of the nested header structure
# heade... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-8 | # Push the current header to the stack
header: HeaderType = {
"level": current_header_level,
"name": name,
"data": stripped_line[len(sep) :].strip(),
}
header_stack... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-9 | @dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
tokens_per_chunk: int
decode: Callable[[list[int]], str]
encode: Callable[[str], List[int]]
[docs]def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks."""
splits: List[str] ... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-10 | "Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._dis... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-11 | "Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_c... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-12 | def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
[docs]class Languag... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-13 | # Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
if _s == "":
separator = _s
break
if re.search(_s, text):
separator = _s
new_separators = ... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-14 | [docs] @staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-15 | "",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-16 | # First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.R... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-17 | return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-18 | "\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\chapter{",
"\n\\\section{",
"\n\\\subsection{",
... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-19 | return [
# Split along compiler informations definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitio... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-20 | splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
[docs]class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
7e1411078879-21 | separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
[docs]class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""... | https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html |
99765d5cc3b7-0 | Source code for langchain.env
import platform
from functools import lru_cache
[docs]@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the environment."""
# Lazy import to avoid circular imports
from langchain import __version__
return {
"library_version": __ve... | https://api.python.langchain.com/en/latest/_modules/langchain/env.html |
e67a63cc0afa-0 | Source code for langchain.requests
"""Lightweight wrapper around requests library, with async support."""
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Dict, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra
[docs]class Requests(BaseModel):
"""Wrapper... | https://api.python.langchain.com/en/latest/_modules/langchain/requests.html |
e67a63cc0afa-1 | return requests.put(url, json=data, headers=self.headers, **kwargs)
[docs] def delete(self, url: str, **kwargs: Any) -> requests.Response:
"""DELETE the URL and return the text."""
return requests.delete(url, headers=self.headers, **kwargs)
@asynccontextmanager
async def _arequest(
se... | https://api.python.langchain.com/en/latest/_modules/langchain/requests.html |
e67a63cc0afa-2 | self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PATCH the URL and return the text asynchronously."""
async with self._arequest("PATCH", url, **kwargs) as response:
yield response
[docs] @asynccontextmanager
async def aput(
... | https://api.python.langchain.com/en/latest/_modules/langchain/requests.html |
e67a63cc0afa-3 | """GET the URL and return the text."""
return self.requests.get(url, **kwargs).text
[docs] def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text."""
return self.requests.post(url, data, **kwargs).text
[docs] def patch(self, url: str, d... | https://api.python.langchain.com/en/latest/_modules/langchain/requests.html |
e67a63cc0afa-4 | """PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, **kwargs) as response:
return await response.text()
[docs] async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text asynchronously."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/requests.html |
88009f8714d4-0 | Source code for langchain.document_transformers
"""Transform documents"""
from typing import Any, Callable, List, Sequence
import numpy as np
from pydantic import BaseModel, Field
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.schema import BaseDocumen... | https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
88009f8714d4-1 | redundant_stacked = np.column_stack(redundant)
redundant_sorted = np.argsort(similarity[redundant])[::-1]
included_idxs = set(range(len(embedded_documents)))
for first_idx, second_idx in redundant_stacked[redundant_sorted]:
if first_idx in included_idxs and second_idx in included_idxs:
#... | https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
88009f8714d4-2 | arbitrary_types_allowed = True
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
... | https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html |
22c1297ffc02-0 | Source code for langchain.retrievers.milvus
"""Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.embeddings.base import Embeddings
from langchain.schema ... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html |
22c1297ffc02-1 | *,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
async def _aget_relevant_documents(
self,
query: str,
*,
... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html |
ebd40a2c2044-0 | Source code for langchain.retrievers.zilliz
"""Zilliz Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.embeddings.base import Embeddings
from langchain.schema ... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html |
ebd40a2c2044-1 | *,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
async def _aget_relevant_documents(
self,
query: str,
*,
... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html |
43717129c2aa-0 | Source code for langchain.retrievers.kendra
import re
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Extra
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.docstore.document import Document
from... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
43717129c2aa-1 | DocumentURI: Optional[str]
FeedbackToken: Optional[str]
Format: Optional[str]
Id: Optional[str]
Type: Optional[str]
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
DocumentExcerpt: Optional[TextWithHighLights]
[docs] def get_attribute_value(self) -> str:
if not se... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
43717129c2aa-2 | return docs
[docs]class DocumentAttributeValue(BaseModel, extra=Extra.allow):
DateValue: Optional[str]
LongValue: Optional[int]
StringListValue: Optional[List[str]]
StringValue: Optional[str]
[docs]class DocumentAttribute(BaseModel, extra=Extra.allow):
Key: str
Value: DocumentAttributeValue
[doc... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
43717129c2aa-3 | """Retriever class to query documents from Amazon Kendra Index.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the ... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
43717129c2aa-4 | else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name is not None:
client_params["region_name"] = region_name
self.client = session.client("kendra", **client_params)
except ImportError:
... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
43717129c2aa-5 | IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
q_result = QueryResult.parse_obj(response)
docs = q_result.get_top_k_docs(top_k)
else:
docs = r_result.get_top_k_docs(top_k)
return docs
def _get_relevant_documents(
self,
... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
09ea1244cd87-0 | Source code for langchain.retrievers.svm
"""SMV Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.callbacks... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
09ea1244cd87-1 | return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
from sklearn import svm
query_embeds = np.array(self.em... | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.