id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
3e17c05b1921-21
try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( "NLTK is not installed, please install it with `pip install nltk`." ) self._separator = separator [docs] def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" # First we naively split the large input into a bunch of smaller ones. splits = self._tokenizer(text) return self._merge_splits(splits, self._separator) [docs]class SpacyTextSplitter(TextSplitter): """Splitting text using Spacy package. Per default, Spacy's `en_core_web_sm` model is used. For a faster, but potentially less accurate splitting, you can use `pipeline='sentencizer'`. """ [docs] def __init__( self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any ) -> None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline) self._separator = separator [docs] def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" splits = (s.text for s in self._tokenizer(text).sents) return self._merge_splits(splits, self._separator) # For backwards compatibility [docs]class PythonCodeTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Python syntax.""" [docs] def __init__(self, **kwargs: Any) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
3e17c05b1921-22
[docs] def __init__(self, **kwargs: Any) -> None: """Initialize a PythonCodeTextSplitter.""" separators = self.get_separators_for_language(Language.PYTHON) super().__init__(separators=separators, **kwargs) [docs]class MarkdownTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Markdown-formatted headings.""" [docs] def __init__(self, **kwargs: Any) -> None: """Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs) [docs]class LatexTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Latex-formatted layout elements.""" [docs] def __init__(self, **kwargs: Any) -> None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/text_splitter.html
8bda427c070e-0
Source code for langchain.cache """ .. warning:: Beta Feature! **Cache** provides an optional caching layer for LLMs. Cache is useful for two reasons: - It can save you money by reducing the number of API calls you make to the LLM provider if you're often requesting the same completion multiple times. - It can speed up your application by reducing the number of API calls you make to the LLM provider. Cache directly competes with Memory. See documentation for Pros and Cons. **Class hierarchy:** .. code-block:: BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache """ from __future__ import annotations import hashlib import inspect import json import logging import warnings from abc import ABC, abstractmethod from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast, ) from sqlalchemy import Column, Integer, String, create_engine, select from sqlalchemy.engine.base import Engine from sqlalchemy.orm import Session from langchain.utils import get_from_env try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.embeddings.base import Embeddings from langchain.load.dump import dumps from langchain.load.load import loads from langchain.schema import ChatGeneration, Generation from langchain.vectorstores.redis import Redis as RedisVectorstore logger = logging.getLogger(__file__) if TYPE_CHECKING: import momento RETURN_VAL_TYPE = Sequence[Generation] def _hash(_input: str) -> str: """Use a deterministic hashing approach."""
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-1
"""Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. """ return json.dumps([generation.dict() for generation in generations]) def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f"Could not decode json to list of generations: {generations_json}" ) [docs]class BaseCache(ABC): """Base interface for cache.""" [docs] @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" [docs] @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" [docs] @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments.""" [docs]class InMemoryCache(BaseCache):
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-2
[docs]class InMemoryCache(BaseCache): """Cache that stores things in memory.""" [docs] def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val [docs] def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {} Base = declarative_base() [docs]class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String) [docs]class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" [docs] def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine)
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-3
self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) [docs] def clear(self, **kwargs: Any) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-4
[docs] def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete() session.commit() [docs]class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" [docs] def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine) [docs]class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" # TODO - implement a TTL policy in Redis [docs] def __init__(self, redis_: Any): """Initialize by passing in Redis instance.""" try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-5
generations.append(Generation(text=text)) return generations if generations else None [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) if isinstance(gen, ChatGeneration): warnings.warn( "NOTE: Generation has not been cached. RedisCache does not" " support caching ChatModel outputs." ) return # Write to a Redis HASH key = self._key(prompt, llm_string) self.redis.hset( key, mapping={ str(idx): generation.text for idx, generation in enumerate(return_val) }, ) [docs] def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs) [docs]class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis [docs] def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search.
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-6
embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, ) except ValueError: redis = RedisVectorstore( embedding_function=self.embedding.embed_query, index_name=index_name, redis_url=self.redis_url, ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name]
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-7
return self._cache_dict[index_name] [docs] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations = [] # Read from a Hash results = llm_cache.similarity_search_limit_score( query=prompt, k=1, score_threshold=self.score_threshold, ) if results: for document in results: for text in document.metadata["return_val"]: generations.append(Generation(text=text)) return generations if generations else None [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) if isinstance(gen, ChatGeneration): warnings.warn( "NOTE: Generation has not been cached. RedisSentimentCache does not" " support caching ChatModel outputs." ) return
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-8
" support caching ChatModel outputs." ) return llm_cache = self._get_llm_cache(llm_string) # Write to vectorstore metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": [generation.text for generation in return_val], } llm_cache.add_texts(texts=[prompt], metadatas=[metadata]) [docs]class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" [docs] def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) langchain.llm_cache = GPTCache(init_gptcache) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-9
except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" _gptcache = self.gptcache_dict.get(llm_string, None) if not _gptcache: _gptcache = self._new_gptcache(llm_string)
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-10
_gptcache = self._new_gptcache(llm_string) return _gptcache [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self._get_gptcache(llm_string) res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None [docs] def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-11
"""Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear() def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") def _validate_ttl(ttl: Optional[timedelta]) -> None: if ttl is not None and ttl <= timedelta(seconds=0): raise ValueError(f"ttl must be positive but was {ttl}.") [docs]class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" [docs] def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-12
Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl [docs] @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`."
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-13
"Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) [docs] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-14
elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None [docs] def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") [docs] def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error):
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
8bda427c070e-15
pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
https://api.python.langchain.com/en/latest/_modules/langchain/cache.html
fc35b1b80207-0
Source code for langchain.model_laboratory """Experiment with different models.""" from __future__ import annotations from typing import List, Optional, Sequence from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.prompts.prompt import PromptTemplate from langchain.utils.input import get_color_mapping, print_text [docs]class ModelLaboratory: """Experiment with different models.""" [docs] def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None): """Initialize with chains to experiment with. Args: chains: list of chains to experiment with. """ for chain in chains: if not isinstance(chain, Chain): raise ValueError( "ModelLaboratory should now be initialized with Chains. " "If you want to initialize with LLMs, use the `from_llms` method " "instead (`ModelLaboratory.from_llms(...)`)" ) if len(chain.input_keys) != 1: raise ValueError( "Currently only support chains with one input variable, " f"got {chain.input_keys}" ) if len(chain.output_keys) != 1: raise ValueError( "Currently only support chains with one output variable, " f"got {chain.output_keys}" ) if names is not None: if len(names) != len(chains): raise ValueError("Length of chains does not match length of names.") self.chains = chains chain_range = [str(i) for i in range(len(self.chains))] self.chain_colors = get_color_mapping(chain_range) self.names = names
https://api.python.langchain.com/en/latest/_modules/langchain/model_laboratory.html
fc35b1b80207-1
self.chain_colors = get_color_mapping(chain_range) self.names = names [docs] @classmethod def from_llms( cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None ) -> ModelLaboratory: """Initialize with LLMs to experiment with and optional prompt. Args: llms: list of LLMs to experiment with prompt: Optional prompt to use to prompt the LLMs. Defaults to None. If a prompt was provided, it should only have one input variable. """ if prompt is None: prompt = PromptTemplate(input_variables=["_input"], template="{_input}") chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms] names = [str(llm) for llm in llms] return cls(chains, names=names) [docs] def compare(self, text: str) -> None: """Compare model outputs on an input text. If a prompt was provided with starting the laboratory, then this text will be fed into the prompt. If no prompt was provided, then the input text is the entire prompt. Args: text: input text to run all models on. """ print(f"\033[1mInput:\033[0m\n{text}\n") for i, chain in enumerate(self.chains): if self.names is not None: name = self.names[i] else: name = str(chain) print_text(name, end="\n") output = chain.run(text) print_text(output, color=self.chain_colors[str(i)], end="\n\n")
https://api.python.langchain.com/en/latest/_modules/langchain/model_laboratory.html
1d31bc0683c0-0
Source code for langchain.llms.openlm from typing import Any, Dict from pydantic import root_validator from langchain.llms.openai import BaseOpenAI [docs]class OpenLM(BaseOpenAI): """OpenLM models.""" @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} @root_validator() def validate_environment(cls, values: Dict) -> Dict: try: import openlm values["client"] = openlm.Completion except ImportError: raise ImportError( "Could not import openlm python package. " "Please install it with `pip install openlm`." ) if values["streaming"]: raise ValueError("Streaming not supported with openlm") return values
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openlm.html
8ba3f0233054-0
Source code for langchain.llms.edenai """Wrapper around EdenAI's Generation API.""" import logging from typing import Any, Dict, List, Literal, Optional from aiohttp import ClientSession from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.requests import Requests from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class EdenAI(LLM): """Wrapper around edenai models. To use, you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings `feature` and `subfeature` are required, but any other model parameters can also be passed in with the format params={model_param: value, ...} for api reference check edenai documentation: http://docs.edenai.co. """ base_url = "https://api.edenai.run/v2" edenai_api_key: Optional[str] = None feature: Literal["text", "image"] = "text" """Which generative feature to use, use text by default""" subfeature: Literal["generation"] = "generation" """Subfeature of above feature, use generation by default""" provider: str """Geneerative provider to use (eg: openai,stabilityai,cohere,google etc.)""" params: Dict[str, Any] """ Parameters to pass to above subfeature (excluding 'providers' & 'text')
https://api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html
8ba3f0233054-1
Parameters to pass to above subfeature (excluding 'providers' & 'text') ref text: https://docs.edenai.co/reference/text_generation_create ref image: https://docs.edenai.co/reference/text_generation_create """ model_kwargs: Dict[str, Any] = Field(default_factory=dict) """extra parameters""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" values["edenai_api_key"] = get_from_dict_or_env( values, "edenai_api_key", "EDENAI_API_KEY" ) return values @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @property def _llm_type(self) -> str: """Return type of model.""" return "edenai"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html
8ba3f0233054-2
"""Return type of model.""" return "edenai" def _format_output(self, output: dict) -> str: if self.feature == "text": return output[self.provider]["generated_text"] else: return output[self.provider]["items"][0]["image"] def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to EdenAI's text generation endpoint. Args: prompt: The prompt to pass into the model. Returns: json formatted str response. """ stops = None if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: stops = self.stop_sequences else: stops = stop url = f"{self.base_url}/{self.feature}/{self.subfeature}" headers = {"Authorization": f"Bearer {self.edenai_api_key}"} payload = { **self.params, "providers": self.provider, "num_images": 1, # always limit to 1 (ignored for text) "text": prompt, **kwargs, } request = Requests(headers=headers) response = request.post(url=url, data=payload) if response.status_code >= 500: raise Exception(f"EdenAI Server: Error {response.status_code}") elif response.status_code >= 400:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html
8ba3f0233054-3
elif response.status_code >= 400: raise ValueError(f"EdenAI received an invalid payload: {response.text}") elif response.status_code != 200: raise Exception( f"EdenAI returned an unexpected response with status " f"{response.status_code}: {response.text}" ) output = self._format_output(response.json()) if stops is not None: output = enforce_stop_tokens(output, stops) return output async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call EdenAi model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. stop: A list of stop words (optional). run_manager: A callback manager for async interaction with LLMs. Returns: The string generated by the model. """ stops = None if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: stops = self.stop_sequences else: stops = stop print("Running the acall") url = f"{self.base_url}/{self.feature}/{self.subfeature}" headers = {"Authorization": f"Bearer {self.edenai_api_key}"} payload = { **self.params, "providers": self.provider, "num_images": 1, # always limit to 1 (ignored for text)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html
8ba3f0233054-4
"text": prompt, **kwargs, } async with ClientSession() as session: print("Requesting") async with session.post(url, json=payload, headers=headers) as response: if response.status >= 500: raise Exception(f"EdenAI Server: Error {response.status}") elif response.status >= 400: raise ValueError( f"EdenAI received an invalid payload: {response.text}" ) elif response.status != 200: raise Exception( f"EdenAI returned an unexpected response with status " f"{response.status}: {response.text}" ) response_json = await response.json() output = self._format_output(response_json) if stops is not None: output = enforce_stop_tokens(output, stops) return output
https://api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html
bc6a4e68db83-0
Source code for langchain.llms.huggingface_hub from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceHub(LLM): """HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
bc6a4e68db83-1
extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
bc6a4e68db83-2
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} params = {**_model_kwargs, **kwargs} response = self.client(inputs=prompt, params=params) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] elif self.client.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
d5ce6d0f793a-0
Source code for langchain.llms.replicate import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Replicate(LLM): """Replicate models. To use, you should have the ``replicate`` python package installed, and the environment variable ``REPLICATE_API_TOKEN`` set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, ...} Example: .. code-block:: python from langchain.llms import Replicate replicate = Replicate(model="stability-ai/stable-diffusion: \ 27b93a2413e7f36cd83da926f365628\ 0b2931564ff050bf9575f1fdf9bcd7478", input={"image_dimensions": "512x512"}) """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) replicate_api_token: Optional[str] = None streaming: bool = Field(default=False) """Whether to stream the results.""" stop: Optional[List[str]] = Field(default=[]) """Stop sequences to early-terminate generation.""" class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
d5ce6d0f793a-1
extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN" ) values["replicate_api_token"] = replicate_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "replicate" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
d5ce6d0f793a-2
**kwargs: Any, ) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError: raise ImportError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ) # get the model and version model_str, version_str = self.model.split(":") model = replicate_python.models.get(model_str) version = model.versions.get(version_str) # sort through the openapi schema to get the name of the first input input_properties = sorted( version.openapi_schema["components"]["schemas"]["Input"][ "properties" ].items(), key=lambda item: item[1].get("x-order", 0), ) first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} prediction = replicate_python.predictions.create( version=version, input={**inputs, **kwargs} ) current_completion: str = "" stop_condition_reached = False for output in prediction.output_iterator(): current_completion += output # test for stop conditions, if specified if stop: for s in stop: if s in current_completion: prediction.cancel() stop_index = current_completion.find(s) current_completion = current_completion[:stop_index] stop_condition_reached = True break if stop_condition_reached: break if self.streaming and run_manager: run_manager.on_llm_new_token(output) return current_completion
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
0771e70d15b1-0
Source code for langchain.llms.predictionguard import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class PredictionGuard(LLM): """Prediction Guard large language models. To use, you should have the ``predictionguard`` python package installed, and the environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass it as a named parameter to the constructor. To use Prediction Guard's API along with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your OpenAI API key as well. Example: .. code-block:: python pgllm = PredictionGuard(model="MPT-7B-Instruct", token="my-access-token", output={ "type": "boolean" }) """ client: Any #: :meta private: model: Optional[str] = "MPT-7B-Instruct" """Model name to use.""" output: Optional[Dict[str, Any]] = None """The output type or structure for controlling the LLM output.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" token: Optional[str] = None """Your Prediction Guard access token.""" stop: Optional[List[str]] = None class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
0771e70d15b1-1
stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the access token and python package exists in environment.""" token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN") try: import predictionguard as pg values["client"] = pg.Client(token=token) except ImportError: raise ImportError( "Could not import predictionguard python package. " "Please install it with `pip install predictionguard`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Prediction Guard API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "predictionguard" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Prediction Guard's model API. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
0771e70d15b1-2
The string generated by the model. Example: .. code-block:: python response = pgllm("Tell me a joke.") """ import predictionguard as pg params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = pg.Completion.create( model=self.model, prompt=prompt, output=self.output, temperature=params["temperature"], max_tokens=params["max_tokens"], **kwargs, ) text = response["choices"][0]["text"] # If stop tokens are provided, Prediction Guard's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
ae4a218ccf7c-0
Source code for langchain.llms.stochasticai import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM): """StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
ae4a218ccf7c-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
ae4a218ccf7c-2
""" params = self.model_kwargs or {} params = {**params, **kwargs} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
40ec21de69c3-0
Source code for langchain.llms.chatglm import logging from typing import Any, List, Mapping, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) [docs]class ChatGLM(LLM): """ChatGLM LLM service. Example: .. code-block:: python from langchain.llms import ChatGLM endpoint_url = ( "http://127.0.0.1:8000" ) ChatGLM_llm = ChatGLM( endpoint_url=endpoint_url ) """ endpoint_url: str = "http://127.0.0.1:8000/" """Endpoint URL to use.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" max_token: int = 20000 """Max token allowed to pass to the model.""" temperature: float = 0.1 """LLM model temperature from 0 to 10.""" history: List[List] = [] """History of the conversation""" top_p: float = 0.7 """Top P for nucleus sampling from 0 to 1""" with_history: bool = False """Whether to use history or not""" @property def _llm_type(self) -> str: return "chat_glm" @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url},
https://api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html
40ec21de69c3-1
return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": _model_kwargs}, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to a ChatGLM LLM inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = chatglm_llm("Who are you?") """ _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = {"Content-Type": "application/json"} payload = { "prompt": prompt, "temperature": self.temperature, "history": self.history, "max_length": self.max_token, "top_p": self.top_p, } payload.update(_model_kwargs) payload.update(kwargs) logger.debug(f"ChatGLM payload: {payload}") # call api try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") logger.debug(f"ChatGLM response: {response}") if response.status_code != 200: raise ValueError(f"Failed with response: {response}") try: parsed_response = response.json() # Check if response content does exists if isinstance(parsed_response, dict):
https://api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html
40ec21de69c3-2
# Check if response content does exists if isinstance(parsed_response, dict): content_keys = "response" if content_keys in parsed_response: text = parsed_response[content_keys] else: raise ValueError(f"No content in response : {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised during decoding response from inference endpoint: {e}." f"\nResponse: {response.text}" ) if stop is not None: text = enforce_stop_tokens(text, stop) if self.with_history: self.history = self.history + [[None, parsed_response["response"]]] return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/chatglm.html
bf0f3ad0423d-0
Source code for langchain.llms.forefrontai from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class ForefrontAI(LLM): """ForefrontAI large language models. To use, you should have the environment variable ``FOREFRONTAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import ForefrontAI forefrontai = ForefrontAI(endpoint_url="") """ endpoint_url: str = "" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 40 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: int = 1 """Penalizes repeated tokens according to frequency.""" forefrontai_api_key: Optional[str] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
bf0f3ad0423d-1
"""Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return { "temperature": self.temperature, "length": self.length, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"endpoint_url": self.endpoint_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "forefrontai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to ForefrontAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ForefrontAI("Tell me a joke.") """ response = requests.post( url=self.endpoint_url, headers={
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
bf0f3ad0423d-2
response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params, **kwargs}, ) response_json = response.json() text = response_json["result"][0]["completion"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
9d4454505c76-0
Source code for langchain.llms.base """Base interface for large language models to expose.""" from __future__ import annotations import asyncio import functools import inspect import json import logging import warnings from abc import ABC, abstractmethod from functools import partial from pathlib import Path from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, Union, cast, ) import yaml from pydantic import Field, root_validator, validator from tenacity import ( RetryCallState, before_sleep_log, retry, retry_base, retry_if_exception_type, stop_after_attempt, wait_exponential, ) import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.load.dump import dumpd from langchain.prompts.base import StringPromptValue from langchain.prompts.chat import ChatPromptValue from langchain.schema import ( Generation, LLMResult, PromptValue, RunInfo, ) from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput from langchain.schema.messages import AIMessage, BaseMessage, get_buffer_string from langchain.schema.output import GenerationChunk from langchain.schema.runnable import RunnableConfig logger = logging.getLogger(__name__) def _get_verbosity() -> bool: return langchain.verbose @functools.lru_cache def _log_error_once(msg: str) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-1
def _log_error_once(msg: str) -> None: """Log an error once.""" logger.error(msg) [docs]def create_base_retry_decorator( error_types: List[Type[BaseException]], max_retries: int = 1, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: """Create a retry decorator for a given LLM and provided list of error types.""" _logging = before_sleep_log(logger, logging.WARNING) def _before_sleep(retry_state: RetryCallState) -> None: _logging(retry_state) if run_manager: if isinstance(run_manager, AsyncCallbackManagerForLLMRun): coro = run_manager.on_retry(retry_state) try: loop = asyncio.get_event_loop() if loop.is_running(): loop.create_task(coro) else: asyncio.run(coro) except Exception as e: _log_error_once(f"Error in on_retry: {e}") else: run_manager.on_retry(retry_state) return None min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards retry_instance: "retry_base" = retry_if_exception_type(error_types[0]) for error in error_types[1:]: retry_instance = retry_instance | retry_if_exception_type(error) return retry( reraise=True, stop=stop_after_attempt(max_retries),
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-2
reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=retry_instance, before_sleep=_before_sleep, ) [docs]def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts [docs]def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-3
llm_output = new_results.llm_output return llm_output [docs]class BaseLLM(BaseLanguageModel[str], ABC): """Base LLM abstract interface. It should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose # --- Runnable methods ---
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-4
else: return verbose # --- Runnable methods --- def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) [docs] def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: return ( self.generate_prompt( [self._convert_input(input)], stop=stop, **(config or {}), **kwargs ) .generations[0][0] .text ) [docs] async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: if type(self)._agenerate == BaseLLM._agenerate: # model doesn't implement async invoke, so use default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, stop=stop, **kwargs) ) llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, **(config or {}), **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-5
) return llm_result.generations[0][0].text [docs] def batch( self, inputs: List[LanguageModelInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, max_concurrency: Optional[int] = None, **kwargs: Any, ) -> List[str]: config = self._get_config_list(config, len(inputs)) if max_concurrency is None: llm_result = self.generate_prompt( [self._convert_input(input) for input in inputs], callbacks=[c.get("callbacks") for c in config], tags=[c.get("tags") for c in config], metadata=[c.get("metadata") for c in config], **kwargs, ) return [g[0].text for g in llm_result.generations] else: batches = [ inputs[i : i + max_concurrency] for i in range(0, len(inputs), max_concurrency) ] return [ output for batch in batches for output in self.batch(batch, config=config, **kwargs) ] [docs] async def abatch( self, inputs: List[LanguageModelInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, max_concurrency: Optional[int] = None, **kwargs: Any, ) -> List[str]: if type(self)._agenerate == BaseLLM._agenerate: # model doesn't implement async batch, so use default implementation return await asyncio.get_running_loop().run_in_executor( None, self.batch, inputs, config, max_concurrency )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-6
None, self.batch, inputs, config, max_concurrency ) config = self._get_config_list(config, len(inputs)) if max_concurrency is None: llm_result = await self.agenerate_prompt( [self._convert_input(input) for input in inputs], callbacks=[c.get("callbacks") for c in config], tags=[c.get("tags") for c in config], metadata=[c.get("metadata") for c in config], **kwargs, ) return [g[0].text for g in llm_result.generations] else: batches = [ inputs[i : i + max_concurrency] for i in range(0, len(inputs), max_concurrency) ] return [ output for batch in batches for output in await self.abatch(batch, config=config, **kwargs) ] [docs] def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[str]: if type(self)._stream == BaseLLM._stream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: prompt = self._convert_input(input).to_string() config = config or {} params = self.dict() params["stop"] = stop params = {**params, **kwargs} options = {"stop": stop} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-7
config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options ) try: generation: Optional[GenerationChunk] = None for chunk in self._stream( prompt, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.text if generation is None: generation = chunk else: generation += chunk assert generation is not None except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e else: run_manager.on_llm_end(LLMResult(generations=[[generation]])) [docs] async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[str]: if type(self)._astream == BaseLLM._astream: # model doesn't implement streaming, so use default implementation yield await self.ainvoke(input, config=config, stop=stop, **kwargs) else: prompt = self._convert_input(input).to_string() config = config or {} params = self.dict() params["stop"] = stop params = {**params, **kwargs} options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-8
config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options ) try: generation: Optional[GenerationChunk] = None async for chunk in self._astream( prompt, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.text if generation is None: generation = chunk else: generation += chunk assert generation is not None except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e else: await run_manager.on_llm_end(LLMResult(generations=[[generation]])) # --- Custom methods --- @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" raise NotImplementedError() def _stream( self, prompt: str, stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-9
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: raise NotImplementedError() def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: raise NotImplementedError() [docs] def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) [docs] async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate( prompt_strings, stop=stop, callbacks=callbacks, **kwargs ) def _generate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[CallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-10
**kwargs: Any, ) -> LLMResult: try: output = ( self._generate( prompts, stop=stop, # TODO: support multiple run managers run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: for run_manager in run_managers: run_manager.on_llm_error(e) raise e flattened_outputs = output.flatten() for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output [docs] def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, *, tags: Optional[Union[List[str], List[List[str]]]] = None, metadata: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) # Create callback managers if isinstance(callbacks, list) and (
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-11
) # Create callback managers if isinstance(callbacks, list) and ( isinstance(callbacks[0], (list, BaseCallbackManager)) or callbacks[0] is None ): # We've received a list of callbacks args to apply to each input assert len(callbacks) == len(prompts) assert tags is None or ( isinstance(tags, list) and len(tags) == len(prompts) ) assert metadata is None or ( isinstance(metadata, list) and len(metadata) == len(prompts) ) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or ([None] * len(prompts))) metadata_list = cast( List[Optional[Dict[str, Any]]], metadata or ([{}] * len(prompts)) ) callback_managers = [ CallbackManager.configure( callback, self.callbacks, self.verbose, tag, self.tags, meta, self.metadata, ) for callback, tag, meta in zip(callbacks, tags_list, metadata_list) ] else: # We've received a single callbacks arg to apply to all inputs callback_managers = [ CallbackManager.configure( cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata, ) ] * len(prompts) params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-12
existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = [ callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options )[0] for callback_manager, prompt in zip(callback_managers, prompts) ] output = self._generate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = [ callback_managers[idx].on_llm_start( dumpd(self), [prompts[idx]], invocation_params=params, options=options, )[0] for idx in missing_prompt_idxs ] new_results = self._generate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-13
else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def _agenerate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[AsyncCallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try: output = ( await self._agenerate( prompts, stop=stop, run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await asyncio.gather( *[run_manager.on_llm_error(e) for run_manager in run_managers] ) raise e flattened_outputs = output.flatten() await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output [docs] async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-14
prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[Callbacks, List[Callbacks]]] = None, *, tags: Optional[Union[List[str], List[List[str]]]] = None, metadata: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # Create callback managers if isinstance(callbacks, list) and ( isinstance(callbacks[0], (list, BaseCallbackManager)) or callbacks[0] is None ): # We've received a list of callbacks args to apply to each input assert len(callbacks) == len(prompts) assert tags is None or ( isinstance(tags, list) and len(tags) == len(prompts) ) assert metadata is None or ( isinstance(metadata, list) and len(metadata) == len(prompts) ) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or ([None] * len(prompts))) metadata_list = cast( List[Optional[Dict[str, Any]]], metadata or ([{}] * len(prompts)) ) callback_managers = [ AsyncCallbackManager.configure( callback, self.callbacks, self.verbose, tag, self.tags, meta, self.metadata, ) for callback, tag, meta in zip(callbacks, tags_list, metadata_list) ] else: # We've received a single callbacks arg to apply to all inputs callback_managers = [
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-15
callback_managers = [ AsyncCallbackManager.configure( cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata, ) ] * len(prompts) params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = await asyncio.gather( *[ callback_manager.on_llm_start( dumpd(self), [prompt], invocation_params=params, options=options ) for callback_manager, prompt in zip(callback_managers, prompts) ] ) run_managers = [r[0] for r in run_managers] output = await self._agenerate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = await asyncio.gather( *[ callback_managers[idx].on_llm_start( dumpd(self), [prompts[idx]], invocation_params=params,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-16
dumpd(self), [prompts[idx]], invocation_params=params, options=options, ) for idx in missing_prompt_idxs ] ) run_managers = [r[0] for r in run_managers] new_results = await self._agenerate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) [docs] def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate( [prompt],
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-17
) return ( self.generate( [prompt], stop=stop, callbacks=callbacks, tags=tags, metadata=metadata, **kwargs, ) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate( [prompt], stop=stop, callbacks=callbacks, tags=tags, metadata=metadata, **kwargs, ) return result.generations[0][0].text [docs] def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop, **kwargs) [docs] def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-18
content = self(text, stop=_stop, **kwargs) return AIMessage(content=content) [docs] async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop, **kwargs) [docs] async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop, **kwargs) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-19
starter_dict["_type"] = self._llm_type return starter_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class LLM(BaseLLM): """Base LLM abstract class. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-20
) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError() def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else self._call(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: if type(self)._acall == LLM._acall: # model doesn't implement async call, so use default implementation return await asyncio.get_running_loop().run_in_executor(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
9d4454505c76-21
return await asyncio.get_running_loop().run_in_executor( None, partial(self._generate, prompts, stop, run_manager, **kwargs) ) """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else await self._acall(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
eb09e2d28d52-0
Source code for langchain.llms.xinference from typing import TYPE_CHECKING, Any, Generator, List, Mapping, Optional, Union from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM if TYPE_CHECKING: from xinference.client import RESTfulChatModelHandle, RESTfulGenerateModelHandle from xinference.model.llm.core import LlamaCppGenerateConfig [docs]class Xinference(LLM): """Wrapper for accessing Xinference's large-scale model inference service. To use, you should have the xinference library installed: .. code-block:: bash pip install "xinference[all]" Check out: https://github.com/xorbitsai/inference To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers Example: To start a local instance of Xinference, run .. code-block:: bash $ xinference You can also deploy Xinference in a distributed cluster. Here are the steps: Starting the supervisor: .. code-block:: bash $ xinference-supervisor Starting the worker: .. code-block:: bash $ xinference-worker Then, launch a model using command line interface (CLI). Example: .. code-block:: bash $ xinference launch -n orca -s 3 -q q4_0 It will return a model UID. Then, you can use Xinference with LangChain. Example: .. code-block:: python from langchain.llms import Xinference llm = Xinference( server_url="http://0.0.0.0:9997",
https://api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html
eb09e2d28d52-1
server_url="http://0.0.0.0:9997", model_uid = {model_uid} # replace model_uid with the model UID return from launching the model ) llm( prompt="Q: where can we visit in the capital of France? A:", generate_config={"max_tokens": 1024, "stream": True}, ) To view all the supported builtin models, run: .. code-block:: bash $ xinference list --all """ # noqa: E501 client: Any server_url: Optional[str] """URL of the xinference server""" model_uid: Optional[str] """UID of the launched model""" def __init__( self, server_url: Optional[str] = None, model_uid: Optional[str] = None ): try: from xinference.client import RESTfulClient except ImportError as e: raise ImportError( "Could not import RESTfulClient from xinference. Please install it" " with `pip install xinference`." ) from e super().__init__( **{ "server_url": server_url, "model_uid": model_uid, } ) if self.server_url is None: raise ValueError("Please provide server URL") if self.model_uid is None: raise ValueError("Please provide the model UID") self.client = RESTfulClient(server_url) @property def _llm_type(self) -> str: """Return type of llm.""" return "xinference" @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html
eb09e2d28d52-2
"""Get the identifying parameters.""" return { **{"server_url": self.server_url}, **{"model_uid": self.model_uid}, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the xinference model and return the output. Args: prompt: The prompt to use for generation. stop: Optional list of stop words to use when generating. generate_config: Optional dictionary for the configuration used for generation. Returns: The generated string by the model. """ model = self.client.get_model(self.model_uid) generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {}) if stop: generate_config["stop"] = stop if generate_config and generate_config.get("stream"): combined_text_output = "" for token in self._stream_generate( model=model, prompt=prompt, run_manager=run_manager, generate_config=generate_config, ): combined_text_output += token return combined_text_output else: completion = model.generate(prompt=prompt, generate_config=generate_config) return completion["choices"][0]["text"] def _stream_generate( self, model: Union["RESTfulGenerateModelHandle", "RESTfulChatModelHandle"], prompt: str, run_manager: Optional[CallbackManagerForLLMRun] = None, generate_config: Optional["LlamaCppGenerateConfig"] = None, ) -> Generator[str, None, None]:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html
eb09e2d28d52-3
) -> Generator[str, None, None]: """ Args: prompt: The prompt to use for generation. model: The model used for generation. stop: Optional list of stop words to use when generating. generate_config: Optional dictionary for the configuration used for generation. Yields: A string token. """ streaming_response = model.generate( prompt=prompt, generate_config=generate_config ) for chunk in streaming_response: if isinstance(chunk, dict): choices = chunk.get("choices", []) if choices: choice = choices[0] if isinstance(choice, dict): token = choice.get("text", "") log_probs = choice.get("logprobs") if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield token
https://api.python.langchain.com/en/latest/_modules/langchain/llms/xinference.html
d9788b77dadb-0
Source code for langchain.llms.mlflow_ai_gateway from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class Params(BaseModel, extra=Extra.allow): """Parameters for the MLflow AI Gateway LLM.""" temperature: float = 0.0 candidate_count: int = 1 """The number of candidates to return.""" stop: Optional[List[str]] = None max_tokens: Optional[int] = None [docs]class MlflowAIGateway(LLM): """ Wrapper around completions LLMs in the MLflow AI Gateway. To use, you should have the ``mlflow[gateway]`` python package installed. For more information, see https://mlflow.org/docs/latest/gateway/index.html. Example: .. code-block:: python from langchain.llms import MlflowAIGateway completions = MlflowAIGateway( gateway_uri="<your-mlflow-ai-gateway-uri>", route="<your-mlflow-ai-gateway-completions-route>", params={ "temperature": 0.1 } ) """ route: str gateway_uri: Optional[str] = None params: Optional[Params] = None def __init__(self, **kwargs: Any): try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mlflow_ai_gateway.html
d9788b77dadb-1
) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri) @property def _default_params(self) -> Dict[str, Any]: params: Dict[str, Any] = { "gateway_uri": self.gateway_uri, "route": self.route, **(self.params.dict() if self.params else {}), } return params @property def _identifying_params(self) -> Mapping[str, Any]: return self._default_params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e data: Dict[str, Any] = { "prompt": prompt, **(self.params.dict() if self.params else {}), } if s := (stop or (self.params.stop if self.params else None)): data["stop"] = s resp = mlflow.gateway.query(self.route, data=data) return resp["candidates"][0]["text"] @property def _llm_type(self) -> str: return "mlflow-ai-gateway"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mlflow_ai_gateway.html
f2144de73fa0-0
Source code for langchain.llms.huggingface_text_gen_inference from typing import Any, AsyncIterator, Dict, Iterator, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.schema.output import GenerationChunk [docs]class HuggingFaceTextGenInference(LLM): """ HuggingFace text generation API. It generates text from a given prompt. Attributes: - max_new_tokens: The maximum number of tokens to generate. - top_k: The number of top-k tokens to consider when generating text. - top_p: The cumulative probability threshold for generating text. - typical_p: The typical probability threshold for generating text. - temperature: The temperature to use when generating text. - repetition_penalty: The repetition penalty to use when generating text. - truncate: truncate inputs tokens to the given size - stop_sequences: A list of stop sequences to use when generating text. - seed: The seed to use when generating text. - inference_server_url: The URL of the inference server to use. - timeout: The timeout value in seconds to use while connecting to inference server. - server_kwargs: The keyword arguments to pass to the inference server. - client: The client object used to communicate with the inference server. - async_client: The async client object used to communicate with the server. Methods: - _call: Generates text based on a given prompt and stop sequences. - _acall: Async generates text based on a given prompt and stop sequences. - _llm_type: Returns the type of LLM.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
f2144de73fa0-1
- _llm_type: Returns the type of LLM. - _default_params: Returns the default parameters for calling text generation inference API. """ """ Example: .. code-block:: python # Basic Example (no streaming) llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, ) print(llm("What is Deep Learning?")) # Streaming response example from langchain.callbacks import streaming_stdout callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()] llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, callbacks = callbacks, streaming = True ) print(llm("What is Deep Learning?")) """ max_new_tokens: int = 512 top_k: Optional[int] = None top_p: Optional[float] = 0.95 typical_p: Optional[float] = 0.95 temperature: float = 0.8 repetition_penalty: Optional[float] = None truncate: Optional[int] = None stop_sequences: List[str] = Field(default_factory=list)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
f2144de73fa0-2
stop_sequences: List[str] = Field(default_factory=list) seed: Optional[int] = None inference_server_url: str = "" timeout: int = 120 server_kwargs: Dict[str, Any] = Field(default_factory=dict) streaming: bool = False client: Any async_client: Any class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: import text_generation values["client"] = text_generation.Client( values["inference_server_url"], timeout=values["timeout"], **values["server_kwargs"], ) values["async_client"] = text_generation.AsyncClient( values["inference_server_url"], timeout=values["timeout"], **values["server_kwargs"], ) except ImportError: raise ImportError( "Could not import text_generation python package. " "Please install it with `pip install text_generation`." ) return values @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_textgen_inference" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling text generation inference API.""" return { "max_new_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "typical_p": self.typical_p, "temperature": self.temperature, "repetition_penalty": self.repetition_penalty,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
f2144de73fa0-3
"temperature": self.temperature, "repetition_penalty": self.repetition_penalty, "truncate": self.truncate, "stop_sequences": self.stop_sequences, "seed": self.seed, } def _invocation_params( self, runtime_stop: Optional[List[str]], **kwargs: Any ) -> Dict[str, Any]: params = {**self._default_params, **kwargs} params["stop_sequences"] = params["stop_sequences"] + (runtime_stop or []) return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.streaming: completion = "" for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion invocation_params = self._invocation_params(stop, **kwargs) res = self.client.generate(prompt, **invocation_params) # remove stop sequences from the end of the generated text for stop_seq in invocation_params["stop_sequences"]: if stop_seq in res.generated_text: res.generated_text = res.generated_text[ : res.generated_text.index(stop_seq) ] return res.generated_text async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.streaming: completion = "" async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
f2144de73fa0-4
async for chunk in self._astream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion invocation_params = self._invocation_params(stop, **kwargs) res = await self.async_client.generate(prompt, **invocation_params) # remove stop sequences from the end of the generated text for stop_seq in invocation_params["stop_sequences"]: if stop_seq in res.generated_text: res.generated_text = res.generated_text[ : res.generated_text.index(stop_seq) ] return res.generated_text def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) for res in self.client.generate_stream(prompt, **invocation_params): # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in invocation_params["stop_sequences"]: if stop_seq in res.token.text: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if res.token.special: text = None elif stop_seq_found: text = res.token.text[: res.token.text.index(stop_seq_found)] else: text = res.token.text # yield text, if any if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text) # break if stop sequence found if stop_seq_found: break
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
f2144de73fa0-5
# break if stop sequence found if stop_seq_found: break async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) async for res in self.async_client.generate_stream(prompt, **invocation_params): # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in invocation_params["stop_sequences"]: if stop_seq in res.token.text: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if res.token.special: text = None elif stop_seq_found: text = res.token.text[: res.token.text.index(stop_seq_found)] else: text = res.token.text # yield text, if any if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text) # break if stop sequence found if stop_seq_found: break
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
58c5601664c8-0
Source code for langchain.llms.fake from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM [docs]class FakeListLLM(LLM): """Fake LLM for testing purposes.""" responses: List i: int = 0 @property def _llm_type(self) -> str: """Return type of llm.""" return "fake-list" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response @property def _identifying_params(self) -> Mapping[str, Any]: return {"responses": self.responses}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/fake.html
b575da701af0-0
Source code for langchain.llms.pipelineai import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class PipelineAI(LLM, BaseModel): """PipelineAI large language models. To use, you should have the ``pipeline-ai`` python package installed, and the environment variable ``PIPELINE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain import PipelineAI pipeline = PipelineAI(pipeline_key="") """ pipeline_key: str = "" """The id or tag of the target pipeline""" pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any pipeline parameters valid for `create` call not explicitly specified.""" pipeline_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("pipeline_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
b575da701af0-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transferred to pipeline_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["pipeline_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" pipeline_api_key = get_from_dict_or_env( values, "pipeline_api_key", "PIPELINE_API_KEY" ) values["pipeline_api_key"] = pipeline_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"pipeline_key": self.pipeline_key}, **{"pipeline_kwargs": self.pipeline_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "pipeline_ai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Pipeline Cloud endpoint.""" try: from pipeline import PipelineCloud except ImportError: raise ValueError( "Could not import pipeline-ai python package. " "Please install it with `pip install pipeline-ai`." ) client = PipelineCloud(token=self.pipeline_api_key)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
b575da701af0-2
) client = PipelineCloud(token=self.pipeline_api_key) params = self.pipeline_kwargs or {} params = {**params, **kwargs} run = client.run_pipeline(self.pipeline_key, [prompt, params]) try: text = run.result_preview[0][0] except AttributeError: raise AttributeError( f"A pipeline run should have a `result_preview` attribute." f"Run was: {run}" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the pipeline parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
7346f532c40e-0
Source code for langchain.llms.huggingface_endpoint from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceEndpoint(LLM): """HuggingFace Endpoint models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
7346f532c40e-1
extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
7346f532c40e-2
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples params = {**_model_kwargs, **kwargs} parameter_payload = {"inputs": prompt, "parameters": params} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": text = generated_text[0]["generated_text"] # Remove prompt if included in generated text. if text.startswith(prompt): text = text[len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization":
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
7346f532c40e-3
elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
b85d3f6d0b3f-0
Source code for langchain.llms.rwkv """RWKV models. Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py """ from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import BaseModel, Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class RWKV(LLM, BaseModel): """RWKV language models. To use, you should have the ``rwkv`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import RWKV model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained RWKV model file.""" tokens_path: str """Path to the RWKV tokens file.""" strategy: str = "cpu fp32" """Token context window.""" rwkv_verbose: bool = True """Print debug information.""" temperature: float = 1.0 """The temperature to use for sampling.""" top_p: float = 0.5 """The top-p value to use for sampling.""" penalty_alpha_frequency: float = 0.4 """Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
b85d3f6d0b3f-1
in the text so far, decreasing the model's likelihood to repeat the same line verbatim..""" penalty_alpha_presence: float = 0.4 """Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics..""" CHUNK_LEN: int = 256 """Batch size for prompt processing.""" max_tokens_per_generation: int = 256 """Maximum number of tokens to generate.""" client: Any = None #: :meta private: tokenizer: Any = None #: :meta private: pipeline: Any = None #: :meta private: model_tokens: Any = None #: :meta private: model_state: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "verbose": self.verbose, "top_p": self.top_p, "temperature": self.temperature, "penalty_alpha_frequency": self.penalty_alpha_frequency, "penalty_alpha_presence": self.penalty_alpha_presence, "CHUNK_LEN": self.CHUNK_LEN, "max_tokens_per_generation": self.max_tokens_per_generation, } @staticmethod def _rwkv_param_names() -> Set[str]: """Get the identifying parameters.""" return { "verbose", } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: import tokenizers except ImportError:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
b85d3f6d0b3f-2
try: import tokenizers except ImportError: raise ImportError( "Could not import tokenizers python package. " "Please install it with `pip install tokenizers`." ) try: from rwkv.model import RWKV as RWKVMODEL from rwkv.utils import PIPELINE values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"]) rwkv_keys = cls._rwkv_param_names() model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys} model_kwargs["verbose"] = values["rwkv_verbose"] values["client"] = RWKVMODEL( values["model"], strategy=values["strategy"], **model_kwargs ) values["pipeline"] = PIPELINE(values["client"], values["tokens_path"]) except ImportError: raise ValueError( "Could not import rwkv python package. " "Please install it with `pip install rwkv`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params, **{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()}, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "rwkv" [docs] def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any: AVOID_REPEAT_TOKENS = [] AVOID_REPEAT = ",:?!"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
b85d3f6d0b3f-3
AVOID_REPEAT = ",:?!" for i in AVOID_REPEAT: dd = self.pipeline.encode(i) assert len(dd) == 1 AVOID_REPEAT_TOKENS += dd tokens = [int(x) for x in _tokens] self.model_tokens += tokens out: Any = None while len(tokens) > 0: out, self.model_state = self.client.forward( tokens[: self.CHUNK_LEN], self.model_state ) tokens = tokens[self.CHUNK_LEN :] END_OF_LINE = 187 out[END_OF_LINE] += newline_adj # adjust \n probability if self.model_tokens[-1] in AVOID_REPEAT_TOKENS: out[self.model_tokens[-1]] = -999999999 return out [docs] def rwkv_generate(self, prompt: str) -> str: self.model_state = None self.model_tokens = [] logits = self.run_rnn(self.tokenizer.encode(prompt).ids) begin = len(self.model_tokens) out_last = begin occurrence: Dict = {} decoded = "" for i in range(self.max_tokens_per_generation): for n in occurrence: logits[n] -= ( self.penalty_alpha_presence + occurrence[n] * self.penalty_alpha_frequency ) token = self.pipeline.sample_logits( logits, temperature=self.temperature, top_p=self.top_p ) END_OF_TEXT = 0 if token == END_OF_TEXT: break if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 logits = self.run_rnn([token])
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
b85d3f6d0b3f-4
occurrence[token] += 1 logits = self.run_rnn([token]) xxx = self.tokenizer.decode(self.model_tokens[out_last:]) if "\ufffd" not in xxx: # avoid utf-8 display issues decoded += xxx out_last = begin + i + 1 if i >= self.max_tokens_per_generation - 100: break return decoded def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: r"""RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
2d1f1c058695-0
Source code for langchain.llms.google_palm from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" try: import google.api_core.exceptions except ImportError: raise ImportError( "Could not import google-api-core python package. " "Please install it with `pip install google-api-core`." ) multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def generate_with_retry(llm: GooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2d1f1c058695-1
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _generate_with_retry(**kwargs: Any) -> Any: return llm.client.generate_text(**kwargs) return _generate_with_retry(**kwargs) def _strip_erroneous_leading_spaces(text: str) -> str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) if has_leading_space: return text.replace("\n ", "\n") else: return text [docs]class GooglePalm(BaseLLM, BaseModel): """Google PaLM models.""" client: Any #: :meta private: google_api_key: Optional[str] model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" max_output_tokens: Optional[int] = None """Maximum number of tokens to include in a candidate. Must be greater than zero.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2d1f1c058695-2
"""Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ImportError( "Could not import google-generativeai python package. " "Please install it with `pip install google-generativeai`." ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: raise ValueError("max_output_tokens must be greater than zero") return values def _generate( self, prompts: List[str],
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2d1f1c058695-3
def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations = [] for prompt in prompts: completion = generate_with_retry( self, model=self.model_name, prompt=prompt, stop_sequences=stop, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, max_output_tokens=self.max_output_tokens, candidate_count=self.n, **kwargs, ) prompt_generations = [] for candidate in completion.candidates: raw_text = candidate["output"] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "google_palm"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
b75b1d3658a9-0
Source code for langchain.llms.vllm from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import BaseLLM from langchain.schema.output import Generation, LLMResult [docs]class VLLM(BaseLLM): model: str = "" """The name or path of a HuggingFace Transformers model.""" tensor_parallel_size: Optional[int] = 1 """The number of GPUs to use for distributed execution with tensor parallelism.""" trust_remote_code: Optional[bool] = False """Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.""" n: int = 1 """Number of output sequences to return for the given prompt.""" best_of: Optional[int] = None """Number of output sequences that are generated from the prompt.""" presence_penalty: float = 0.0 """Float that penalizes new tokens based on whether they appear in the generated text so far""" frequency_penalty: float = 0.0 """Float that penalizes new tokens based on their frequency in the generated text so far""" temperature: float = 1.0 """Float that controls the randomness of the sampling.""" top_p: float = 1.0 """Float that controls the cumulative probability of the top tokens to consider.""" top_k: int = -1 """Integer that controls the number of top tokens to consider.""" use_beam_search: bool = False """Whether to use beam search instead of sampling.""" stop: Optional[List[str]] = None """List of strings that stop the generation when they are generated."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vllm.html
b75b1d3658a9-1
"""List of strings that stop the generation when they are generated.""" ignore_eos: bool = False """Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.""" max_new_tokens: int = 512 """Maximum number of tokens to generate per output sequence.""" client: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: from vllm import LLM as VLLModel except ImportError: raise ImportError( "Could not import vllm python package. " "Please install it with `pip install vllm`." ) values["client"] = VLLModel( model=values["model"], tensor_parallel_size=values["tensor_parallel_size"], trust_remote_code=values["trust_remote_code"], ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling vllm.""" return { "n": self.n, "best_of": self.best_of, "max_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "temperature": self.temperature, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "stop": self.stop, "ignore_eos": self.ignore_eos, "use_beam_search": self.use_beam_search, } def _generate( self, prompts: List[str], stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vllm.html
b75b1d3658a9-2
prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" from vllm import SamplingParams # build sampling parameters params = {**self._default_params, **kwargs, "stop": stop} sampling_params = SamplingParams(**params) # call the model outputs = self.client.generate(prompts, sampling_params) generations = [] for output in outputs: text = output.outputs[0].text generations.append([Generation(text=text)]) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "vllm"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vllm.html
ddb3a5df9545-0
Source code for langchain.llms.amazon_api_gateway from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class ContentHandlerAmazonAPIGateway: """Adapter to prepare the inputs from Langchain to a format that LLM model expects. It also provides helper function to extract the generated text from the model response.""" [docs] @classmethod def transform_input( cls, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: return {"inputs": prompt, "parameters": model_kwargs} [docs] @classmethod def transform_output(cls, response: Any) -> str: return response.json()[0]["generated_text"] [docs]class AmazonAPIGateway(LLM): """Amazon API Gateway to access LLM models hosted on AWS.""" api_url: str """API Gateway URL""" headers: Optional[Dict] = None """API Gateway HTTP Headers to send, e.g. for authentication""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway() """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html
ddb3a5df9545-1
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"api_url": self.api_url, "headers": self.headers}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_api_gateway" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Amazon API Gateway model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} payload = self.content_handler.transform_input(prompt, _model_kwargs) try: response = requests.post( self.api_url, headers=self.headers, json=payload, ) text = self.content_handler.transform_output(response) except Exception as error: raise ValueError(f"Error raised by the service: {error}") if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html