id
stringlengths 14
15
| text
stringlengths 35
2.51k
| source
stringlengths 61
154
|
|---|---|---|
c48fab993cdb-7
|
langchain.embeddings.tensorflow_hub
langchain.embeddings.vertexai
langchain.env
langchain.evaluation.agents.trajectory_eval_chain
langchain.evaluation.comparison.eval_chain
langchain.evaluation.criteria.eval_chain
langchain.evaluation.loading
langchain.evaluation.qa.eval_chain
langchain.evaluation.qa.generate_chain
langchain.evaluation.run_evaluators.base
langchain.evaluation.run_evaluators.implementations
langchain.evaluation.schema
langchain.example_generator
langchain.experimental.autonomous_agents.autogpt.memory
langchain.experimental.autonomous_agents.autogpt.output_parser
langchain.experimental.autonomous_agents.autogpt.prompt
langchain.experimental.autonomous_agents.autogpt.prompt_generator
langchain.experimental.autonomous_agents.baby_agi.baby_agi
langchain.experimental.autonomous_agents.baby_agi.task_creation
langchain.experimental.autonomous_agents.baby_agi.task_execution
langchain.experimental.autonomous_agents.baby_agi.task_prioritization
langchain.experimental.generative_agents.generative_agent
langchain.experimental.generative_agents.memory
langchain.experimental.llms.jsonformer_decoder
langchain.experimental.llms.rellm_decoder
langchain.experimental.plan_and_execute.agent_executor
langchain.experimental.plan_and_execute.executors.agent_executor
langchain.experimental.plan_and_execute.executors.base
langchain.experimental.plan_and_execute.planners.base
langchain.experimental.plan_and_execute.planners.chat_planner
langchain.experimental.plan_and_execute.schema
langchain.formatting
langchain.graphs.networkx_graph
langchain.indexes.graph
langchain.indexes.vectorstore
langchain.input
langchain.llms.ai21
langchain.llms.aleph_alpha
langchain.llms.amazon_api_gateway
langchain.llms.anthropic
langchain.llms.anyscale
langchain.llms.aviary
langchain.llms.azureml_endpoint
langchain.llms.bananadev
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-8
|
langchain.llms.azureml_endpoint
langchain.llms.bananadev
langchain.llms.base
langchain.llms.baseten
langchain.llms.beam
langchain.llms.bedrock
langchain.llms.cerebriumai
langchain.llms.clarifai
langchain.llms.cohere
langchain.llms.ctransformers
langchain.llms.databricks
langchain.llms.deepinfra
langchain.llms.fake
langchain.llms.forefrontai
langchain.llms.google_palm
langchain.llms.gooseai
langchain.llms.gpt4all
langchain.llms.huggingface_endpoint
langchain.llms.huggingface_hub
langchain.llms.huggingface_pipeline
langchain.llms.huggingface_text_gen_inference
langchain.llms.human
langchain.llms.llamacpp
langchain.llms.loading
langchain.llms.manifest
langchain.llms.modal
langchain.llms.mosaicml
langchain.llms.nlpcloud
langchain.llms.octoai_endpoint
langchain.llms.openai
langchain.llms.openllm
langchain.llms.openlm
langchain.llms.petals
langchain.llms.pipelineai
langchain.llms.predictionguard
langchain.llms.promptlayer_openai
langchain.llms.replicate
langchain.llms.rwkv
langchain.llms.sagemaker_endpoint
langchain.llms.self_hosted
langchain.llms.self_hosted_hugging_face
langchain.llms.stochasticai
langchain.llms.textgen
langchain.llms.utils
langchain.llms.vertexai
langchain.llms.writer
langchain.load.dump
langchain.load.load
langchain.load.serializable
langchain.math_utils
langchain.memory.buffer
langchain.memory.buffer_window
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-9
|
langchain.math_utils
langchain.memory.buffer
langchain.memory.buffer_window
langchain.memory.chat_memory
langchain.memory.chat_message_histories.cassandra
langchain.memory.chat_message_histories.cosmos_db
langchain.memory.chat_message_histories.dynamodb
langchain.memory.chat_message_histories.file
langchain.memory.chat_message_histories.firestore
langchain.memory.chat_message_histories.in_memory
langchain.memory.chat_message_histories.momento
langchain.memory.chat_message_histories.mongodb
langchain.memory.chat_message_histories.postgres
langchain.memory.chat_message_histories.redis
langchain.memory.chat_message_histories.sql
langchain.memory.chat_message_histories.zep
langchain.memory.combined
langchain.memory.entity
langchain.memory.kg
langchain.memory.motorhead_memory
langchain.memory.readonly
langchain.memory.simple
langchain.memory.summary
langchain.memory.summary_buffer
langchain.memory.token_buffer
langchain.memory.utils
langchain.memory.vectorstore
langchain.output_parsers.boolean
langchain.output_parsers.combining
langchain.output_parsers.datetime
langchain.output_parsers.enum
langchain.output_parsers.fix
langchain.output_parsers.json
langchain.output_parsers.list
langchain.output_parsers.loading
langchain.output_parsers.openai_functions
langchain.output_parsers.pydantic
langchain.output_parsers.rail_parser
langchain.output_parsers.regex
langchain.output_parsers.regex_dict
langchain.output_parsers.retry
langchain.output_parsers.structured
langchain.prompts.base
langchain.prompts.chat
langchain.prompts.example_selector.base
langchain.prompts.example_selector.length_based
langchain.prompts.example_selector.ngram_overlap
langchain.prompts.example_selector.semantic_similarity
langchain.prompts.few_shot
langchain.prompts.few_shot_with_templates
langchain.prompts.loading
langchain.prompts.pipeline
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-10
|
langchain.prompts.loading
langchain.prompts.pipeline
langchain.prompts.prompt
langchain.requests
langchain.retrievers.arxiv
langchain.retrievers.azure_cognitive_search
langchain.retrievers.chatgpt_plugin_retriever
langchain.retrievers.contextual_compression
langchain.retrievers.databerry
langchain.retrievers.docarray
langchain.retrievers.document_compressors.base
langchain.retrievers.document_compressors.chain_extract
langchain.retrievers.document_compressors.chain_filter
langchain.retrievers.document_compressors.cohere_rerank
langchain.retrievers.document_compressors.embeddings_filter
langchain.retrievers.elastic_search_bm25
langchain.retrievers.kendra
langchain.retrievers.knn
langchain.retrievers.llama_index
langchain.retrievers.merger_retriever
langchain.retrievers.metal
langchain.retrievers.milvus
langchain.retrievers.multi_query
langchain.retrievers.pinecone_hybrid_search
langchain.retrievers.pubmed
langchain.retrievers.remote_retriever
langchain.retrievers.self_query.base
langchain.retrievers.self_query.chroma
langchain.retrievers.self_query.myscale
langchain.retrievers.self_query.pinecone
langchain.retrievers.self_query.qdrant
langchain.retrievers.self_query.weaviate
langchain.retrievers.svm
langchain.retrievers.tfidf
langchain.retrievers.time_weighted_retriever
langchain.retrievers.vespa_retriever
langchain.retrievers.weaviate_hybrid_search
langchain.retrievers.wikipedia
langchain.retrievers.zep
langchain.retrievers.zilliz
langchain.schema
langchain.server
langchain.sql_database
langchain.text_splitter
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-11
|
langchain.schema
langchain.server
langchain.sql_database
langchain.text_splitter
langchain.tools.arxiv.tool
langchain.tools.azure_cognitive_services.form_recognizer
langchain.tools.azure_cognitive_services.image_analysis
langchain.tools.azure_cognitive_services.speech2text
langchain.tools.azure_cognitive_services.text2speech
langchain.tools.azure_cognitive_services.utils
langchain.tools.base
langchain.tools.bing_search.tool
langchain.tools.brave_search.tool
langchain.tools.convert_to_openai
langchain.tools.ddg_search.tool
langchain.tools.file_management.copy
langchain.tools.file_management.delete
langchain.tools.file_management.file_search
langchain.tools.file_management.list_dir
langchain.tools.file_management.move
langchain.tools.file_management.read
langchain.tools.file_management.utils
langchain.tools.file_management.write
langchain.tools.gmail.base
langchain.tools.gmail.create_draft
langchain.tools.gmail.get_message
langchain.tools.gmail.get_thread
langchain.tools.gmail.search
langchain.tools.gmail.send_message
langchain.tools.gmail.utils
langchain.tools.google_places.tool
langchain.tools.google_search.tool
langchain.tools.google_serper.tool
langchain.tools.graphql.tool
langchain.tools.human.tool
langchain.tools.ifttt
langchain.tools.interaction.tool
langchain.tools.jira.tool
langchain.tools.json.tool
langchain.tools.metaphor_search.tool
langchain.tools.office365.base
langchain.tools.office365.create_draft_message
langchain.tools.office365.events_search
langchain.tools.office365.messages_search
langchain.tools.office365.send_event
langchain.tools.office365.send_message
langchain.tools.office365.utils
langchain.tools.openapi.utils.api_models
langchain.tools.openweathermap.tool
langchain.tools.playwright.base
langchain.tools.playwright.click
langchain.tools.playwright.current_page
langchain.tools.playwright.extract_hyperlinks
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-12
|
langchain.tools.playwright.current_page
langchain.tools.playwright.extract_hyperlinks
langchain.tools.playwright.extract_text
langchain.tools.playwright.get_elements
langchain.tools.playwright.navigate
langchain.tools.playwright.navigate_back
langchain.tools.playwright.utils
langchain.tools.plugin
langchain.tools.powerbi.tool
langchain.tools.pubmed.tool
langchain.tools.python.tool
langchain.tools.requests.tool
langchain.tools.scenexplain.tool
langchain.tools.searx_search.tool
langchain.tools.shell.tool
langchain.tools.sleep.tool
langchain.tools.spark_sql.tool
langchain.tools.sql_database.tool
langchain.tools.steamship_image_generation.tool
langchain.tools.steamship_image_generation.utils
langchain.tools.vectorstore.tool
langchain.tools.wikipedia.tool
langchain.tools.wolfram_alpha.tool
langchain.tools.youtube.search
langchain.tools.zapier.tool
langchain.utilities.apify
langchain.utilities.arxiv
langchain.utilities.awslambda
langchain.utilities.bibtex
langchain.utilities.bing_search
langchain.utilities.brave_search
langchain.utilities.duckduckgo_search
langchain.utilities.google_places_api
langchain.utilities.google_search
langchain.utilities.google_serper
langchain.utilities.graphql
langchain.utilities.jira
langchain.utilities.loading
langchain.utilities.metaphor_search
langchain.utilities.openapi
langchain.utilities.openweathermap
langchain.utilities.powerbi
langchain.utilities.pupmed
langchain.utilities.python
langchain.utilities.scenexplain
langchain.utilities.searx_search
langchain.utilities.serpapi
langchain.utilities.twilio
langchain.utilities.vertexai
langchain.utilities.wikipedia
langchain.utilities.wolfram_alpha
langchain.utilities.zapier
langchain.utils
langchain.vectorstores.alibabacloud_opensearch
langchain.vectorstores.analyticdb
langchain.vectorstores.annoy
langchain.vectorstores.atlas
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
c48fab993cdb-13
|
langchain.vectorstores.annoy
langchain.vectorstores.atlas
langchain.vectorstores.awadb
langchain.vectorstores.azuresearch
langchain.vectorstores.base
langchain.vectorstores.cassandra
langchain.vectorstores.chroma
langchain.vectorstores.clarifai
langchain.vectorstores.clickhouse
langchain.vectorstores.deeplake
langchain.vectorstores.docarray.base
langchain.vectorstores.docarray.hnsw
langchain.vectorstores.docarray.in_memory
langchain.vectorstores.elastic_vector_search
langchain.vectorstores.faiss
langchain.vectorstores.hologres
langchain.vectorstores.lancedb
langchain.vectorstores.matching_engine
langchain.vectorstores.milvus
langchain.vectorstores.mongodb_atlas
langchain.vectorstores.myscale
langchain.vectorstores.opensearch_vector_search
langchain.vectorstores.pinecone
langchain.vectorstores.qdrant
langchain.vectorstores.redis
langchain.vectorstores.rocksetdb
langchain.vectorstores.singlestoredb
langchain.vectorstores.sklearn
langchain.vectorstores.starrocks
langchain.vectorstores.supabase
langchain.vectorstores.tair
langchain.vectorstores.tigris
langchain.vectorstores.typesense
langchain.vectorstores.utils
langchain.vectorstores.vectara
langchain.vectorstores.weaviate
langchain.vectorstores.zilliz
pydantic.config
pydantic.env_settings
pydantic.utils
|
https://api.python.langchain.com/en/latest/_modules/index.html
|
aa2462be5f66-0
|
Source code for langchain.utils
"""Generic utility functions."""
import contextlib
import datetime
import importlib
import os
from typing import Any, Callable, Dict, List, Optional, Tuple
from requests import HTTPError, Response
[docs]def get_from_dict_or_env(
data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None
) -> str:
"""Get a value from a dictionary or an environment variable."""
if key in data and data[key]:
return data[key]
else:
return get_from_env(key, env_key, default=default)
[docs]def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable."""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
elif default is not None:
return default
else:
raise ValueError(
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
[docs]def xor_args(*arg_groups: Tuple[str, ...]) -> Callable:
"""Validate specified keyword args are mutually exclusive."""
def decorator(func: Callable) -> Callable:
def wrapper(*args: Any, **kwargs: Any) -> Callable:
"""Validate exactly one arg in each group is not None."""
counts = [
sum(1 for arg in arg_group if kwargs.get(arg) is not None)
for arg_group in arg_groups
]
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils.html
|
aa2462be5f66-1
|
for arg_group in arg_groups
]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError(
"Exactly one argument in each of the following"
" groups must be defined:"
f" {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
return decorator
[docs]def raise_for_status_with_text(response: Response) -> None:
"""Raise an error with the response text."""
try:
response.raise_for_status()
except HTTPError as e:
raise ValueError(response.text) from e
[docs]def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" + stringify_dict(val)
elif isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
else:
return str(val)
[docs]def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
[docs]def comma_list(items: List[Any]) -> str:
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils.html
|
aa2462be5f66-2
|
return text
[docs]def comma_list(items: List[Any]) -> str:
return ", ".join(str(item) for item in items)
[docs]@contextlib.contextmanager
def mock_now(dt_value): # type: ignore
"""Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
"""
class MockDateTime(datetime.datetime):
@classmethod
def now(cls): # type: ignore
# Create a copy of dt_value.
return datetime.datetime(
dt_value.year,
dt_value.month,
dt_value.day,
dt_value.hour,
dt_value.minute,
dt_value.second,
dt_value.microsecond,
dt_value.tzinfo,
)
real_datetime = datetime.datetime
datetime.datetime = MockDateTime
try:
yield datetime.datetime
finally:
datetime.datetime = real_datetime
[docs]def guard_import(
module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None
) -> Any:
"""Dynamically imports a module and raises a helpful exception if the module is not
installed."""
try:
module = importlib.import_module(module_name, package)
except ImportError:
raise ImportError(
f"Could not import {module_name} python package. "
f"Please install it with `pip install {pip_name or module_name}`."
)
return module
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils.html
|
4d4d679f8dc3-0
|
Source code for langchain.server
"""Script to run langchain-server locally using docker-compose."""
import subprocess
from pathlib import Path
from langchainplus_sdk.cli.main import get_docker_compose_command
[docs]def main() -> None:
"""Run the langchain server locally."""
p = Path(__file__).absolute().parent / "docker-compose.yaml"
docker_compose_command = get_docker_compose_command()
subprocess.run([*docker_compose_command, "-f", str(p), "pull"])
subprocess.run([*docker_compose_command, "-f", str(p), "up"])
if __name__ == "__main__":
main()
|
https://api.python.langchain.com/en/latest/_modules/langchain/server.html
|
b6a2e3f4b850-0
|
Source code for langchain.cache
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
from langchain.utils import get_from_env
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.embeddings.base import Embeddings
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.schema import Generation
from langchain.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
RETURN_VAL_TYPE = Sequence[Generation]
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-1
|
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
[docs]class BaseCache(ABC):
"""Base interface for cache."""
[docs] @abstractmethod
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
[docs] @abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
[docs] @abstractmethod
def clear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments."""
[docs]class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-2
|
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
[docs]class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
[docs]class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-3
|
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
[docs]class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
[docs]class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
# TODO - implement a TTL policy in Redis
def __init__(self, redis_: Any):
"""Initialize by passing in Redis instance."""
try:
from redis import Redis
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-4
|
"""Initialize by passing in Redis instance."""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
# Write to a Redis HASH
key = self._key(prompt, llm_string)
self.redis.hset(
key,
mapping={
str(idx): generation.text for idx, generation in enumerate(return_val)
},
)
[docs] def clear(self, **kwargs: Any) -> None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-5
|
)
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
[docs]class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
import langchain
from langchain.cache import RedisSemanticCache
from langchain.embeddings import OpenAIEmbeddings
langchain.llm_cache = RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
)
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-6
|
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
)
except ValueError:
redis = RedisVectorstore(
embedding_function=self.embedding.embed_query,
index_name=index_name,
redis_url=self.redis_url,
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations = []
# Read from a Hash
results = llm_cache.similarity_search_limit_score(
query=prompt,
k=1,
score_threshold=self.score_threshold,
)
if results:
for document in results:
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-7
|
)
if results:
for document in results:
for text in document.metadata["return_val"]:
generations.append(Generation(text=text))
return generations if generations else None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
llm_cache = self._get_llm_cache(llm_string)
# Write to vectorstore
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": [generation.text for generation in return_val],
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
[docs]class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
# Avoid multiple caches using the same file,
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-8
|
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
langchain.llm_cache = GPTCache(init_gptcache)
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-9
|
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
return self.gptcache_dict.get(llm_string, self._new_gptcache(llm_string))
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self.gptcache_dict.get(llm_string, None)
if _gptcache is None:
return None
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-10
|
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
[docs]class MomentoCache(BaseCache):
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-11
|
[docs]class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-12
|
self.cache_name = cache_name
self.ttl = ttl
[docs] @classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
[docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-13
|
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
[docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
b6a2e3f4b850-14
|
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
[docs] def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
|
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
|
07683b2d904f-0
|
Source code for langchain.math_utils
"""Math utils."""
from typing import List, Optional, Tuple, Union
import numpy as np
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
[docs]def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
[docs]def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
|
https://api.python.langchain.com/en/latest/_modules/langchain/math_utils.html
|
07683b2d904f-1
|
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
sorted_idxs = score_array.flatten().argsort()[::-1]
top_k = top_k or len(sorted_idxs)
top_idxs = sorted_idxs[:top_k]
score_threshold = score_threshold or -1.0
top_idxs = top_idxs[score_array.flatten()[top_idxs] > score_threshold]
ret_idxs = [(x // score_array.shape[1], x % score_array.shape[1]) for x in top_idxs]
scores = score_array.flatten()[top_idxs].tolist()
return ret_idxs, scores
|
https://api.python.langchain.com/en/latest/_modules/langchain/math_utils.html
|
15cd4ddf1ce2-0
|
Source code for langchain.example_generator
"""Utility functions for working with prompts."""
from typing import List
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
[docs]def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
|
https://api.python.langchain.com/en/latest/_modules/langchain/example_generator.html
|
91392afd6fea-0
|
Source code for langchain.schema
"""Common schema objects."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from inspect import signature
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
NamedTuple,
Optional,
Sequence,
TypeVar,
Union,
)
from uuid import UUID
from pydantic import BaseModel, Field, root_validator
from langchain.load.serializable import Serializable
if TYPE_CHECKING:
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
Callbacks,
)
RUN_KEY = "__run"
[docs]def get_buffer_string(
messages: List[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
) -> str:
"""Get buffer string of messages."""
string_messages = []
for m in messages:
if isinstance(m, HumanMessage):
role = human_prefix
elif isinstance(m, AIMessage):
role = ai_prefix
elif isinstance(m, SystemMessage):
role = "System"
elif isinstance(m, FunctionMessage):
role = "Function"
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f"Got unsupported message type: {m}")
message = f"{role}: {m.content}"
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
string_messages.append(message)
return "\n".join(string_messages)
@dataclass
class AgentAction:
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-1
|
return "\n".join(string_messages)
@dataclass
class AgentAction:
"""Agent's action to take."""
tool: str
tool_input: Union[str, dict]
log: str
[docs]class AgentFinish(NamedTuple):
"""Agent's return value."""
return_values: dict
log: str
[docs]class Generation(Serializable):
"""Output of a single generation."""
text: str
"""Generated text output."""
generation_info: Optional[Dict[str, Any]] = None
"""Raw generation info response from the provider"""
"""May include things like reason for finishing (e.g. in OpenAI)"""
# TODO: add log probs
@property
def lc_serializable(self) -> bool:
"""This class is LangChain serializable."""
return True
[docs]class BaseMessage(Serializable):
"""Message object."""
content: str
additional_kwargs: dict = Field(default_factory=dict)
@property
@abstractmethod
def type(self) -> str:
"""Type of the message, used for serialization."""
@property
def lc_serializable(self) -> bool:
"""This class is LangChain serializable."""
return True
[docs]class HumanMessage(BaseMessage):
"""Type of message that is spoken by the human."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "human"
[docs]class AIMessage(BaseMessage):
"""Type of message that is spoken by the AI."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-2
|
"""Type of the message, used for serialization."""
return "ai"
[docs]class SystemMessage(BaseMessage):
"""Type of message that is a system message."""
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "system"
[docs]class FunctionMessage(BaseMessage):
name: str
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "function"
[docs]class ChatMessage(BaseMessage):
"""Type of message with arbitrary speaker."""
role: str
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "chat"
def _message_to_dict(message: BaseMessage) -> dict:
return {"type": message.type, "data": message.dict()}
[docs]def messages_to_dict(messages: List[BaseMessage]) -> List[dict]:
"""Convert messages to dict.
Args:
messages: List of messages to convert.
Returns:
List of dicts.
"""
return [_message_to_dict(m) for m in messages]
def _message_from_dict(message: dict) -> BaseMessage:
_type = message["type"]
if _type == "human":
return HumanMessage(**message["data"])
elif _type == "ai":
return AIMessage(**message["data"])
elif _type == "system":
return SystemMessage(**message["data"])
elif _type == "chat":
return ChatMessage(**message["data"])
else:
raise ValueError(f"Got unexpected type: {_type}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-3
|
else:
raise ValueError(f"Got unexpected type: {_type}")
[docs]def messages_from_dict(messages: List[dict]) -> List[BaseMessage]:
"""Convert messages from dict.
Args:
messages: List of messages (dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_from_dict(m) for m in messages]
[docs]class ChatGeneration(Generation):
"""Output of a single generation."""
text = ""
message: BaseMessage
[docs] @root_validator
def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]:
values["text"] = values["message"].content
return values
[docs]class RunInfo(BaseModel):
"""Class that contains all relevant metadata for a Run."""
run_id: UUID
[docs]class ChatResult(BaseModel):
"""Class that contains all relevant information for a Chat Result."""
generations: List[ChatGeneration]
"""List of the things generated."""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output."""
[docs]class LLMResult(BaseModel):
"""Class that contains all relevant information for an LLM Result."""
generations: List[List[Generation]]
"""List of the things generated. This is List[List[]] because
each input could have multiple generations."""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output."""
run: Optional[List[RunInfo]] = None
"""Run metadata."""
[docs] def flatten(self) -> List[LLMResult]:
"""Flatten generations into a single list."""
llm_results = []
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-4
|
"""Flatten generations into a single list."""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = self.llm_output.copy()
llm_output["token_usage"] = dict()
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
[docs]class PromptValue(Serializable, ABC):
[docs] @abstractmethod
def to_string(self) -> str:
"""Return prompt as string."""
[docs] @abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
[docs]class BaseMemory(Serializable, ABC):
"""Base interface for memory in chains."""
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
@abstractmethod
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
[docs] @abstractmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-5
|
"""Input keys this memory class will load dynamically."""
[docs] @abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain.
If None, return all memories
"""
[docs] @abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
[docs] @abstractmethod
def clear(self) -> None:
"""Clear memory contents."""
[docs]class BaseChatMessageHistory(ABC):
"""Base interface for chat message history
See `ChatMessageHistory` for default implementation.
"""
"""
Example:
.. code-block:: python
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
@property
def messages(self):
with open(os.path.join(storage_path, session_id), 'r:utf-8') as f:
messages = json.loads(f.read())
return messages_from_dict(messages)
def add_message(self, message: BaseMessage) -> None:
messages = self.messages.append(_message_to_dict(message))
with open(os.path.join(storage_path, session_id), 'w') as f:
json.dump(f, messages)
def clear(self):
with open(os.path.join(storage_path, session_id), 'w') as f:
f.write("[]")
"""
messages: List[BaseMessage]
[docs] def add_user_message(self, message: str) -> None:
"""Add a user message to the store"""
self.add_message(HumanMessage(content=message))
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-6
|
self.add_message(HumanMessage(content=message))
[docs] def add_ai_message(self, message: str) -> None:
"""Add an AI message to the store"""
self.add_message(AIMessage(content=message))
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
raise NotImplementedError
[docs] @abstractmethod
def clear(self) -> None:
"""Remove all messages from the store"""
[docs]class Document(Serializable):
"""Interface for interacting with a document."""
page_content: str
metadata: dict = Field(default_factory=dict)
[docs]class BaseRetriever(ABC):
"""Base interface for a retriever."""
_new_arg_supported: bool = False
_expects_other_args: bool = False
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
# Version upgrade for old retrievers that implemented the public
# methods directly.
if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:
warnings.warn(
"Retrievers must implement abstract `_get_relevant_documents` method"
" instead of `get_relevant_documents`",
DeprecationWarning,
)
swap = cls.get_relevant_documents
cls.get_relevant_documents = ( # type: ignore[assignment]
BaseRetriever.get_relevant_documents
)
cls._get_relevant_documents = swap # type: ignore[assignment]
if (
hasattr(cls, "aget_relevant_documents")
and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents
):
warnings.warn(
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-7
|
):
warnings.warn(
"Retrievers must implement abstract `_aget_relevant_documents` method"
" instead of `aget_relevant_documents`",
DeprecationWarning,
)
aswap = cls.aget_relevant_documents
cls.aget_relevant_documents = ( # type: ignore[assignment]
BaseRetriever.aget_relevant_documents
)
cls._aget_relevant_documents = aswap # type: ignore[assignment]
parameters = signature(cls._get_relevant_documents).parameters
cls._new_arg_supported = parameters.get("run_manager") is not None
# If a V1 retriever broke the interface and expects additional arguments
cls._expects_other_args = (not cls._new_arg_supported) and len(parameters) > 2
@abstractmethod
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: string to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
@abstractmethod
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: string to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
[docs] def get_relevant_documents(
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-8
|
List of relevant documents
"""
[docs] def get_relevant_documents(
self, query: str, *, callbacks: Callbacks = None, **kwargs: Any
) -> List[Document]:
"""Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
Returns:
List of relevant documents
"""
from langchain.callbacks.manager import CallbackManager
callback_manager = CallbackManager.configure(
callbacks, None, verbose=kwargs.get("verbose", False)
)
run_manager = callback_manager.on_retriever_start(
query,
**kwargs,
)
try:
if self._new_arg_supported:
result = self._get_relevant_documents(
query, run_manager=run_manager, **kwargs
)
elif self._expects_other_args:
result = self._get_relevant_documents(query, **kwargs)
else:
result = self._get_relevant_documents(query) # type: ignore[call-arg]
except Exception as e:
run_manager.on_retriever_error(e)
raise e
else:
run_manager.on_retriever_end(
result,
**kwargs,
)
return result
[docs] async def aget_relevant_documents(
self, query: str, *, callbacks: Callbacks = None, **kwargs: Any
) -> List[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
Returns:
List of relevant documents
"""
from langchain.callbacks.manager import AsyncCallbackManager
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-9
|
List of relevant documents
"""
from langchain.callbacks.manager import AsyncCallbackManager
callback_manager = AsyncCallbackManager.configure(
callbacks, None, verbose=kwargs.get("verbose", False)
)
run_manager = await callback_manager.on_retriever_start(
query,
**kwargs,
)
try:
if self._new_arg_supported:
result = await self._aget_relevant_documents(
query, run_manager=run_manager, **kwargs
)
elif self._expects_other_args:
result = await self._aget_relevant_documents(query, **kwargs)
else:
result = await self._aget_relevant_documents(
query, # type: ignore[call-arg]
)
except Exception as e:
await run_manager.on_retriever_error(e)
raise e
else:
await run_manager.on_retriever_end(
result,
**kwargs,
)
return result
# For backwards compatibility
Memory = BaseMemory
T = TypeVar("T")
[docs]class BaseLLMOutputParser(Serializable, ABC, Generic[T]):
[docs] @abstractmethod
def parse_result(self, result: List[Generation]) -> T:
"""Parse LLM Result."""
[docs]class BaseOutputParser(BaseLLMOutputParser, ABC, Generic[T]):
"""Class to parse the output of an LLM call.
Output parsers help structure language model responses.
"""
[docs] def parse_result(self, result: List[Generation]) -> T:
return self.parse(result[0].text)
[docs] @abstractmethod
def parse(self, text: str) -> T:
"""Parse the output of an LLM call.
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-10
|
"""Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Args:
text: output of language model
Returns:
structured output
"""
[docs] def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any:
"""Optional method to parse the output of an LLM call with a prompt.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: output of language model
prompt: prompt value
Returns:
structured output
"""
return self.parse(completion)
[docs] def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
@property
def _type(self) -> str:
"""Return the type key."""
raise NotImplementedError(
f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization."
)
[docs] def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of output parser."""
output_parser_dict = super().dict()
output_parser_dict["_type"] = self._type
return output_parser_dict
[docs]class NoOpOutputParser(BaseOutputParser[str]):
"""Output parser that just returns the text as is."""
@property
def lc_serializable(self) -> bool:
return True
@property
def _type(self) -> str:
return "default"
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
91392afd6fea-11
|
@property
def _type(self) -> str:
return "default"
[docs] def parse(self, text: str) -> str:
return text
[docs]class OutputParserException(ValueError):
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
"""
def __init__(
self,
error: Any,
observation: str | None = None,
llm_output: str | None = None,
send_to_llm: bool = False,
):
super(OutputParserException, self).__init__(error)
if send_to_llm:
if observation is None or llm_output is None:
raise ValueError(
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
[docs]class BaseDocumentTransformer(ABC):
"""Base interface for transforming documents."""
[docs] @abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents."""
[docs] @abstractmethod
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/schema.html
|
2a018cb7ab09-0
|
Source code for langchain.input
"""Handle chained inputs."""
from typing import Dict, List, Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
[docs]def get_color_mapping(
items: List[str], excluded_colors: Optional[List] = None
) -> Dict[str, str]:
"""Get mapping for items to a support color."""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)}
return color_mapping
[docs]def get_colored_text(text: str, color: str) -> str:
"""Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
[docs]def get_bolded_text(text: str) -> str:
"""Get bolded text."""
return f"\033[1m{text}\033[0m"
[docs]def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters."""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
|
https://api.python.langchain.com/en/latest/_modules/langchain/input.html
|
2a018cb7ab09-1
|
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file
|
https://api.python.langchain.com/en/latest/_modules/langchain/input.html
|
e590a2e6137f-0
|
Source code for langchain.sql_database
"""SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional
import sqlalchemy
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
from langchain import utils
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
[docs]def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-1
|
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-2
|
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-3
|
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-4
|
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = utils.get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = utils.get_from_env(
"api_token", "DATABRICKS_TOKEN", default_api_token
)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-5
|
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-6
|
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-7
|
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-8
|
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
f"ALTER SESSION SET search_path='{self._schema}'"
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'")
else:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
result = cursor.fetchone() # type: ignore
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
# Convert columns values to string to avoid issues with sqlalchmey
# trunacating text
if isinstance(result, list):
return str(
[
tuple(
truncate_word(c, length=self._max_string_length)
for c in r
)
for r in result
]
)
return str(
tuple(
truncate_word(c, length=self._max_string_length) for c in result
)
)
return ""
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e590a2e6137f-9
|
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/sql_database.html
|
e5c28a0e251c-0
|
Source code for langchain.base_language
"""Base class for all language models."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Sequence, Set
from langchain.callbacks.manager import Callbacks
from langchain.load.serializable import Serializable
from langchain.schema import BaseMessage, LLMResult, PromptValue, get_buffer_string
def _get_token_ids_default_method(text: str) -> List[int]:
"""Encode the text into token IDs."""
# TODO: this method may not be exact.
# TODO: this method may differ based on model (eg codex).
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install transformers`."
)
# create a GPT-2 tokenizer instance
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
# tokenize the text using the GPT-2 tokenizer
return tokenizer.encode(text)
[docs]class BaseLanguageModel(Serializable, ABC):
[docs] @abstractmethod
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Take in a list of prompt values and return an LLMResult."""
[docs] @abstractmethod
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
|
https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html
|
e5c28a0e251c-1
|
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Take in a list of prompt values and return an LLMResult."""
[docs] @abstractmethod
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Predict text from text."""
[docs] @abstractmethod
def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Predict message from messages."""
[docs] @abstractmethod
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Predict text from text."""
[docs] @abstractmethod
async def apredict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Predict message from messages."""
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the token present in the text."""
return _get_token_ids_default_method(text)
[docs] def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
return len(self.get_token_ids(text))
[docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Get the number of tokens in the message."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html
|
e5c28a0e251c-2
|
"""Get the number of tokens in the message."""
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
[docs] @classmethod
def all_required_field_names(cls) -> Set:
all_required_field_names = set()
for field in cls.__fields__.values():
all_required_field_names.add(field.name)
if field.has_alias:
all_required_field_names.add(field.alias)
return all_required_field_names
|
https://api.python.langchain.com/en/latest/_modules/langchain/base_language.html
|
b139a8d65875-0
|
Source code for langchain.formatting
"""Utilities for formatting strings."""
from string import Formatter
from typing import Any, List, Mapping, Sequence, Union
[docs]class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
[docs] def check_unused_args(
self,
used_args: Sequence[Union[int, str]],
args: Sequence,
kwargs: Mapping[str, Any],
) -> None:
"""Check to see if extra parameters are passed."""
extra = set(kwargs).difference(used_args)
if extra:
raise KeyError(extra)
[docs] def vformat(
self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]
) -> str:
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
"No arguments should be provided, "
"everything should be passed as keyword arguments."
)
return super().vformat(format_string, args, kwargs)
[docs] def validate_input_variables(
self, format_string: str, input_variables: List[str]
) -> None:
dummy_inputs = {input_variable: "foo" for input_variable in input_variables}
super().format(format_string, **dummy_inputs)
formatter = StrictFormatter()
|
https://api.python.langchain.com/en/latest/_modules/langchain/formatting.html
|
7e1411078879-0
|
Source code for langchain.text_splitter
"""Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
cast,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
TS = TypeVar("TS", bound="TextSplitter")
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = text.split(separator)
else:
splits = list(text)
return [s for s in splits if s != ""]
[docs]class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-1
|
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether to keep the separator in the chunks
add_start_index: If `True`, includes chunk's start index in metadata
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
[docs] @abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
[docs] def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-2
|
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
[docs] def split_documents(self, documents: Iterable[Document]) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-3
|
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
[docs] @classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-4
|
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
[docs] @classmethod
def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
[docs] def transform_documents(
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-5
|
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
raise NotImplementedError
[docs]class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = _split_text_with_regex(text, self._separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
[docs]class LineType(TypedDict):
"""Line type as typed dict."""
metadata: Dict[str, str]
content: str
[docs]class HeaderType(TypedDict):
"""Header type as typed dict."""
level: int
name: str
data: str
class MarkdownHeaderTextSplitter:
"""Implementation of splitting markdown files based on specified headers."""
def __init__(
self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False
):
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-6
|
):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
"""
# Output line-by-line or aggregated into chunks w/ common headers
self.return_each_line = return_each_line
# Given the headers we want to split on,
# (e.g., "#, ##, etc") order by length
self.headers_to_split_on = sorted(
headers_to_split_on, key=lambda split: len(split[0]), reverse=True
)
def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[Document]:
"""Combine lines with common metadata into chunks
Args:
lines: Line of text / associated header metadata
"""
aggregated_chunks: List[LineType] = []
for line in lines:
if (
aggregated_chunks
and aggregated_chunks[-1]["metadata"] == line["metadata"]
):
# If the last line in the aggregated list
# has the same metadata as the current line,
# append the current content to the last lines's content
aggregated_chunks[-1]["content"] += " \n" + line["content"]
else:
# Otherwise, append the current line to the aggregated list
aggregated_chunks.append(line)
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in aggregated_chunks
]
def split_text(self, text: str) -> List[Document]:
"""Split markdown file
Args:
text: Markdown file"""
# Split the input text by newline character ("\n").
lines = text.split("\n")
# Final output
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-7
|
lines = text.split("\n")
# Final output
lines_with_metadata: List[LineType] = []
# Content and metadata of the chunk currently being processed
current_content: List[str] = []
current_metadata: Dict[str, str] = {}
# Keep track of the nested header structure
# header_stack: List[Dict[str, Union[int, str]]] = []
header_stack: List[HeaderType] = []
initial_metadata: Dict[str, str] = {}
for line in lines:
stripped_line = line.strip()
# Check each line against each of the header types (e.g., #, ##)
for sep, name in self.headers_to_split_on:
# Check if line starts with a header that we intend to split on
if stripped_line.startswith(sep) and (
# Header with no text OR header is followed by space
# Both are valid conditions that sep is being used a header
len(stripped_line) == len(sep)
or stripped_line[len(sep)] == " "
):
# Ensure we are tracking the header as metadata
if name is not None:
# Get the current header level
current_header_level = sep.count("#")
# Pop out headers of lower or same level from the stack
while (
header_stack
and header_stack[-1]["level"] >= current_header_level
):
# We have encountered a new header
# at the same or higher level
popped_header = header_stack.pop()
# Clear the metadata for the
# popped header in initial_metadata
if popped_header["name"] in initial_metadata:
initial_metadata.pop(popped_header["name"])
# Push the current header to the stack
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-8
|
# Push the current header to the stack
header: HeaderType = {
"level": current_header_level,
"name": name,
"data": stripped_line[len(sep) :].strip(),
}
header_stack.append(header)
# Update initial_metadata with the current header
initial_metadata[name] = header["data"]
# Add the previous line to the lines_with_metadata
# only if current_content is not empty
if current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
break
else:
if stripped_line:
current_content.append(stripped_line)
elif current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
current_metadata = initial_metadata.copy()
if current_content:
lines_with_metadata.append(
{"content": "\n".join(current_content), "metadata": current_metadata}
)
# lines_with_metadata has each line with associated header metadata
# aggregate these into chunks based on common metadata
if not self.return_each_line:
return self.aggregate_lines_to_chunks(lines_with_metadata)
else:
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in lines_with_metadata
]
# should be in newer Python versions (3.10+)
# @dataclass(frozen=True, kw_only=True, slots=True)
@dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-9
|
@dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
tokens_per_chunk: int
decode: Callable[[list[int]], str]
encode: Callable[[str], List[int]]
[docs]def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks."""
splits: List[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(tokenizer.decode(chunk_ids))
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
[docs]class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-10
|
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
[docs] def split_text(self, text: str) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs]class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformer python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-11
|
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
[docs] def split_text(self, text: str) -> List[str]:
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs] def count_tokens(self, *, text: str) -> int:
return len(self._encode(text))
_max_length_equal_32_bit_integer = 2**32
def _encode(self, text: str) -> List[int]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-12
|
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
[docs]class Language(str, Enum):
CPP = "cpp"
GO = "go"
JAVA = "java"
JS = "js"
PHP = "php"
PROTO = "proto"
PYTHON = "python"
RST = "rst"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
SWIFT = "swift"
MARKDOWN = "markdown"
LATEX = "latex"
HTML = "html"
SOL = "sol"
[docs]class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-13
|
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
if _s == "":
separator = _s
break
if re.search(_s, text):
separator = _s
new_separators = separators[i + 1 :]
break
splits = _split_text_with_regex(text, separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
[docs] def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
[docs] @classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, **kwargs)
[docs] @staticmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-14
|
[docs] @staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-15
|
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-16
|
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-17
|
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\*\*\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-18
|
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\chapter{",
"\n\\\section{",
"\n\\\subsection{",
"\n\\\subsubsection{",
# Now split by environments
"\n\\\begin{enumerate}",
"\n\\\begin{itemize}",
"\n\\\begin{description}",
"\n\\\begin{list}",
"\n\\\begin{quote}",
"\n\\\begin{quotation}",
"\n\\\begin{verse}",
"\n\\\begin{verbatim}",
# Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
elif language == Language.SOL:
return [
# Split along compiler informations definitions
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-19
|
return [
# Split along compiler informations definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
[docs]class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-20
|
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
[docs]class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
# For backwards compatibility
[docs]class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
[docs]class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a MarkdownTextSplitter."""
separators = self.get_separators_for_language(Language.MARKDOWN)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
7e1411078879-21
|
separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
[docs]class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LatexTextSplitter."""
separators = self.get_separators_for_language(Language.LATEX)
super().__init__(separators=separators, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
|
99765d5cc3b7-0
|
Source code for langchain.env
import platform
from functools import lru_cache
[docs]@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the environment."""
# Lazy import to avoid circular imports
from langchain import __version__
return {
"library_version": __version__,
"library": "langchain",
"platform": platform.platform(),
"runtime": "python",
"runtime_version": platform.python_version(),
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/env.html
|
e67a63cc0afa-0
|
Source code for langchain.requests
"""Lightweight wrapper around requests library, with async support."""
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Dict, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra
[docs]class Requests(BaseModel):
"""Wrapper around requests to handle auth and async.
The main purpose of this wrapper is to handle authentication (by saving
headers) and enable easy async methods on the same base object.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def get(self, url: str, **kwargs: Any) -> requests.Response:
"""GET the URL and return the text."""
return requests.get(url, headers=self.headers, **kwargs)
[docs] def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""POST to the URL and return the text."""
return requests.post(url, json=data, headers=self.headers, **kwargs)
[docs] def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PATCH the URL and return the text."""
return requests.patch(url, json=data, headers=self.headers, **kwargs)
[docs] def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PUT the URL and return the text."""
return requests.put(url, json=data, headers=self.headers, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/requests.html
|
e67a63cc0afa-1
|
return requests.put(url, json=data, headers=self.headers, **kwargs)
[docs] def delete(self, url: str, **kwargs: Any) -> requests.Response:
"""DELETE the URL and return the text."""
return requests.delete(url, headers=self.headers, **kwargs)
@asynccontextmanager
async def _arequest(
self, method: str, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""Make an async request."""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.request(
method, url, headers=self.headers, **kwargs
) as response:
yield response
else:
async with self.aiosession.request(
method, url, headers=self.headers, **kwargs
) as response:
yield response
[docs] @asynccontextmanager
async def aget(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""GET the URL and return the text asynchronously."""
async with self._arequest("GET", url, **kwargs) as response:
yield response
[docs] @asynccontextmanager
async def apost(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""POST to the URL and return the text asynchronously."""
async with self._arequest("POST", url, **kwargs) as response:
yield response
[docs] @asynccontextmanager
async def apatch(
self, url: str, data: Dict[str, Any], **kwargs: Any
|
https://api.python.langchain.com/en/latest/_modules/langchain/requests.html
|
e67a63cc0afa-2
|
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PATCH the URL and return the text asynchronously."""
async with self._arequest("PATCH", url, **kwargs) as response:
yield response
[docs] @asynccontextmanager
async def aput(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PUT the URL and return the text asynchronously."""
async with self._arequest("PUT", url, **kwargs) as response:
yield response
[docs] @asynccontextmanager
async def adelete(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""DELETE the URL and return the text asynchronously."""
async with self._arequest("DELETE", url, **kwargs) as response:
yield response
[docs]class TextRequestsWrapper(BaseModel):
"""Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def requests(self) -> Requests:
return Requests(headers=self.headers, aiosession=self.aiosession)
[docs] def get(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/requests.html
|
e67a63cc0afa-3
|
"""GET the URL and return the text."""
return self.requests.get(url, **kwargs).text
[docs] def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text."""
return self.requests.post(url, data, **kwargs).text
[docs] def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
[docs] def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text."""
return self.requests.put(url, data, **kwargs).text
[docs] def delete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text."""
return self.requests.delete(url, **kwargs).text
[docs] async def aget(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text asynchronously."""
async with self.requests.aget(url, **kwargs) as response:
return await response.text()
[docs] async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text asynchronously."""
async with self.requests.apost(url, **kwargs) as response:
return await response.text()
[docs] async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text asynchronously."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/requests.html
|
e67a63cc0afa-4
|
"""PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, **kwargs) as response:
return await response.text()
[docs] async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text asynchronously."""
async with self.requests.aput(url, **kwargs) as response:
return await response.text()
[docs] async def adelete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text asynchronously."""
async with self.requests.adelete(url, **kwargs) as response:
return await response.text()
# For backwards compatibility
RequestsWrapper = TextRequestsWrapper
|
https://api.python.langchain.com/en/latest/_modules/langchain/requests.html
|
88009f8714d4-0
|
Source code for langchain.document_transformers
"""Transform documents"""
from typing import Any, Callable, List, Sequence
import numpy as np
from pydantic import BaseModel, Field
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.schema import BaseDocumentTransformer, Document
class _DocumentWithState(Document):
"""Wrapper for a document that includes arbitrary state."""
state: dict = Field(default_factory=dict)
"""State associated with the document."""
def to_document(self) -> Document:
"""Convert the DocumentWithState to a Document."""
return Document(page_content=self.page_content, metadata=self.metadata)
@classmethod
def from_document(cls, doc: Document) -> "_DocumentWithState":
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata)
[docs]def get_stateful_documents(
documents: Sequence[Document],
) -> Sequence[_DocumentWithState]:
"""Convert a list of documents to a list of documents with state.
Args:
documents: The documents to convert.
Returns:
A list of documents with state.
"""
return [_DocumentWithState.from_document(doc) for doc in documents]
def _filter_similar_embeddings(
embedded_documents: List[List[float]], similarity_fn: Callable, threshold: float
) -> List[int]:
"""Filter redundant documents based on the similarity of their embeddings."""
similarity = np.tril(similarity_fn(embedded_documents, embedded_documents), k=-1)
redundant = np.where(similarity > threshold)
redundant_stacked = np.column_stack(redundant)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html
|
88009f8714d4-1
|
redundant_stacked = np.column_stack(redundant)
redundant_sorted = np.argsort(similarity[redundant])[::-1]
included_idxs = set(range(len(embedded_documents)))
for first_idx, second_idx in redundant_stacked[redundant_sorted]:
if first_idx in included_idxs and second_idx in included_idxs:
# Default to dropping the second document of any highly similar pair.
included_idxs.remove(second_idx)
return list(sorted(included_idxs))
def _get_embeddings_from_stateful_docs(
embeddings: Embeddings, documents: Sequence[_DocumentWithState]
) -> List[List[float]]:
if len(documents) and "embedded_doc" in documents[0].state:
embedded_documents = [doc.state["embedded_doc"] for doc in documents]
else:
embedded_documents = embeddings.embed_documents(
[d.page_content for d in documents]
)
for doc, embedding in zip(documents, embedded_documents):
doc.state["embedded_doc"] = embedding
return embedded_documents
[docs]class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
"""Filter that drops redundant documents by comparing their embeddings."""
embeddings: Embeddings
"""Embeddings to use for embedding document contents."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
similarity_threshold: float = 0.95
"""Threshold for determining when two documents are similar enough
to be considered redundant."""
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def transform_documents(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html
|
88009f8714d4-2
|
arbitrary_types_allowed = True
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers.html
|
22c1297ffc02-0
|
Source code for langchain.retrievers.milvus
"""Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
[docs]class MilvusRetriever(BaseRetriever):
"""Retriever that uses the Milvus API."""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[Dict[str, Any]] = None,
consistency_level: str = "Session",
search_params: Optional[dict] = None,
):
self.store = Milvus(
embedding_function,
collection_name,
connection_args,
consistency_level,
)
self.retriever = self.store.as_retriever(search_kwargs={"param": search_params})
[docs] def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html
|
22c1297ffc02-1
|
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError
[docs]def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html
|
ebd40a2c2044-0
|
Source code for langchain.retrievers.zilliz
"""Zilliz Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
[docs]class ZillizRetriever(BaseRetriever):
"""Retriever that uses the Zilliz API."""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[Dict[str, Any]] = None,
consistency_level: str = "Session",
search_params: Optional[dict] = None,
):
self.store = Zilliz(
embedding_function,
collection_name,
connection_args,
consistency_level,
)
self.retriever = self.store.as_retriever(search_kwargs={"param": search_params})
[docs] def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html
|
ebd40a2c2044-1
|
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError
[docs]def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""
Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html
|
43717129c2aa-0
|
Source code for langchain.retrievers.kendra
import re
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Extra
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]def clean_excerpt(excerpt: str) -> str:
if not excerpt:
return excerpt
res = re.sub("\s+", " ", excerpt).replace("...", "")
return res
[docs]def combined_text(title: str, excerpt: str) -> str:
if not title or not excerpt:
return ""
return f"Document Title: {title} \nDocument Excerpt: \n{excerpt}\n"
[docs]class Highlight(BaseModel, extra=Extra.allow):
BeginOffset: int
EndOffset: int
TopAnswer: Optional[bool]
Type: Optional[str]
[docs]class TextWithHighLights(BaseModel, extra=Extra.allow):
Text: str
Highlights: Optional[Any]
[docs]class AdditionalResultAttributeValue(BaseModel, extra=Extra.allow):
TextWithHighlightsValue: TextWithHighLights
[docs]class AdditionalResultAttribute(BaseModel, extra=Extra.allow):
Key: str
ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"]
Value: AdditionalResultAttributeValue
[docs] def get_value_text(self) -> str:
return self.Value.TextWithHighlightsValue.Text
[docs]class QueryResultItem(BaseModel, extra=Extra.allow):
DocumentId: str
DocumentTitle: TextWithHighLights
DocumentURI: Optional[str]
FeedbackToken: Optional[str]
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
43717129c2aa-1
|
DocumentURI: Optional[str]
FeedbackToken: Optional[str]
Format: Optional[str]
Id: Optional[str]
Type: Optional[str]
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
DocumentExcerpt: Optional[TextWithHighLights]
[docs] def get_attribute_value(self) -> str:
if not self.AdditionalAttributes:
return ""
if not self.AdditionalAttributes[0]:
return ""
else:
return self.AdditionalAttributes[0].get_value_text()
[docs] def get_excerpt(self) -> str:
if (
self.AdditionalAttributes
and self.AdditionalAttributes[0].Key == "AnswerText"
):
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ""
return clean_excerpt(excerpt)
[docs] def to_doc(self) -> Document:
title = self.DocumentTitle.Text
source = self.DocumentURI
excerpt = self.get_excerpt()
type = self.Type
page_content = combined_text(title, excerpt)
metadata = {"source": source, "title": title, "excerpt": excerpt, "type": type}
return Document(page_content=page_content, metadata=metadata)
[docs]class QueryResult(BaseModel, extra=Extra.allow):
ResultItems: List[QueryResultItem]
[docs] def get_top_k_docs(self, top_n: int) -> List[Document]:
items_len = len(self.ResultItems)
count = items_len if items_len < top_n else top_n
docs = [self.ResultItems[i].to_doc() for i in range(0, count)]
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
43717129c2aa-2
|
return docs
[docs]class DocumentAttributeValue(BaseModel, extra=Extra.allow):
DateValue: Optional[str]
LongValue: Optional[int]
StringListValue: Optional[List[str]]
StringValue: Optional[str]
[docs]class DocumentAttribute(BaseModel, extra=Extra.allow):
Key: str
Value: DocumentAttributeValue
[docs]class RetrieveResultItem(BaseModel, extra=Extra.allow):
Content: Optional[str]
DocumentAttributes: Optional[List[DocumentAttribute]] = []
DocumentId: Optional[str]
DocumentTitle: Optional[str]
DocumentURI: Optional[str]
Id: Optional[str]
[docs] def get_excerpt(self) -> str:
if not self.Content:
return ""
return clean_excerpt(self.Content)
[docs] def to_doc(self) -> Document:
title = self.DocumentTitle if self.DocumentTitle else ""
source = self.DocumentURI
excerpt = self.get_excerpt()
page_content = combined_text(title, excerpt)
metadata = {"source": source, "title": title, "excerpt": excerpt}
return Document(page_content=page_content, metadata=metadata)
[docs]class RetrieveResult(BaseModel, extra=Extra.allow):
QueryId: str
ResultItems: List[RetrieveResultItem]
[docs] def get_top_k_docs(self, top_n: int) -> List[Document]:
items_len = len(self.ResultItems)
count = items_len if items_len < top_n else top_n
docs = [self.ResultItems[i].to_doc() for i in range(0, count)]
return docs
[docs]class AmazonKendraRetriever(BaseRetriever):
"""Retriever class to query documents from Amazon Kendra Index.
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
43717129c2aa-3
|
"""Retriever class to query documents from Amazon Kendra Index.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
top_k: No of results to return
attribute_filter: Additional filtering of results based on metadata
See: https://docs.aws.amazon.com/kendra/latest/APIReference
client: boto3 client for Kendra
Example:
.. code-block:: python
retriever = AmazonKendraRetriever(
index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03"
)
"""
def __init__(
self,
index_id: str,
region_name: Optional[str] = None,
credentials_profile_name: Optional[str] = None,
top_k: int = 3,
attribute_filter: Optional[Dict] = None,
client: Optional[Any] = None,
):
self.index_id = index_id
self.top_k = top_k
self.attribute_filter = attribute_filter
if client is not None:
self.client = client
return
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
# use default credentials
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
43717129c2aa-4
|
else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name is not None:
client_params["region_name"] = region_name
self.client = session.client("kendra", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _kendra_query(
self,
query: str,
top_k: int,
attribute_filter: Optional[Dict] = None,
) -> List[Document]:
if attribute_filter is not None:
response = self.client.retrieve(
IndexId=self.index_id,
QueryText=query.strip(),
PageSize=top_k,
AttributeFilter=attribute_filter,
)
else:
response = self.client.retrieve(
IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
r_result = RetrieveResult.parse_obj(response)
result_len = len(r_result.ResultItems)
if result_len == 0:
# retrieve API returned 0 results, call query API
if attribute_filter is not None:
response = self.client.query(
IndexId=self.index_id,
QueryText=query.strip(),
PageSize=top_k,
AttributeFilter=attribute_filter,
)
else:
response = self.client.query(
IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
43717129c2aa-5
|
IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
q_result = QueryResult.parse_obj(response)
docs = q_result.get_top_k_docs(top_k)
else:
docs = r_result.get_top_k_docs(top_k)
return docs
def _get_relevant_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
"""Run search on Kendra index and get top k documents
Example:
.. code-block:: python
docs = retriever.get_relevant_documents('This is my query')
"""
docs = self._kendra_query(query, self.top_k, self.attribute_filter)
return docs
async def _aget_relevant_documents(
self,
query: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError("Async version is not implemented for Kendra yet.")
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
|
09ea1244cd87-0
|
Source code for langchain.retrievers.svm
"""SMV Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
[docs]def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class SVMRetriever(BaseRetriever, BaseModel):
"""SVM Retriever."""
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> SVMRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
|
09ea1244cd87-1
|
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
from sklearn import svm
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(
class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1
)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
# svm.LinearSVC in scikit-learn is non-deterministic.
# if a text is the same as a query, there is no guarantee
# the query will be in the first index.
# this performs a simple swap, this works because anything
# left of the 0 should be equivalent.
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1 : self.k + 1]:
if (
self.relevancy_threshold is None
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.