id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
9f6cfcdc65e7-0
Source code for langchain.llms.replicate """Wrapper around Replicate API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils im...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
9f6cfcdc65e7-1
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
9f6cfcdc65e7-2
**kwargs: Any, ) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError: raise ImportError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
2f2251f98c5f-0
Source code for langchain.llms.google_palm """Wrapper arround Google's PaLM Text APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2f2251f98c5f-1
), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def generate_with_retry(llm: GooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _generate_with_retry(**kwargs: Any) -> Any: ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2f2251f98c5f-2
"""Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" max_output_tokens: Optional[int] = None """Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 """Number of chat compl...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
2f2251f98c5f-3
raise ValueError("max_output_tokens must be greater than zero") return values def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations = []...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
f0d04eaecca2-0
Source code for langchain.llms.beam """Wrapper around Beam API.""" import base64 import json import logging import subprocess import textwrap import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import Callba...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
f0d04eaecca2-1
max_length=50) llm._deploy() call_result = llm._call(input) """ model_name: str = "" name: str = "" cpu: str = "" memory: str = "" gpu: str = "" python_version: str = "" python_packages: List[str] = [] max_length: str = "" url: str = "" """model endpoi...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
f0d04eaecca2-2
def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" beam_client_id = get_from_dict_or_env( values, "beam_client_id", "BEAM_CLIENT_ID" ) beam_client_secret = get_from_dict_or_env( values, "beam_...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
f0d04eaecca2-3
inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = "app.py" with open(script_name, "w") as file: file.write( ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
f0d04eaecca2-4
"""Call to Beam.""" try: import beam # type: ignore if beam.__path__ == "": raise ImportError except ImportError: raise ImportError( "Could not import beam python package. " "Please install it with `curl " ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
f0d04eaecca2-5
**kwargs: Any, ) -> str: """Call to Beam.""" url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url payload = {"prompt": prompt, "max_length": self.max_length} payload.update(kwargs) headers = { "Accept": "*/*", "Accept-Encoding": ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
4ec5083658a3-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforc...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4ec5083658a3-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whet...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4ec5083658a3-2
echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None comple...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4ec5083658a3-3
def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4ec5083658a3-4
"disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4ec5083658a3-5
"""Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
4aa96d57ed11-0
Source code for langchain.llms.textgen """Wrapper around text-generation-webui.""" import logging from typing import Any, Dict, List, Optional import requests from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
4aa96d57ed11-1
number. Higher value = higher range of possible random results.""" typical_p: Optional[float] = 1 """If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.""" epsilon_cutoff: Optional[float] = 0 # In units of 1e-4 """Epsilon c...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
4aa96d57ed11-2
"""Seed (-1 for random)""" add_bos_token: bool = Field(True, alias="add_bos_token") """Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative.""" truncation_length: Optional[int] = 2048 """Truncate the prompt up to this length. The leftmost tokens are remove...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
4aa96d57ed11-3
"num_beams": self.num_beams, "penalty_alpha": self.penalty_alpha, "length_penalty": self.length_penalty, "early_stopping": self.early_stopping, "seed": self.seed, "add_bos_token": self.add_bos_token, "truncation_length": self.truncation_length, ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
4aa96d57ed11-4
return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the textgen web API and return the output. Args: prompt: The prompt to use fo...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
e1cf133d3fe7-0
Source code for langchain.llms.octoai_endpoint """Wrapper around OctoAI APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
e1cf133d3fe7-1
"""OCTOAI API Token""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" oct...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
e1cf133d3fe7-2
_model_kwargs = self.model_kwargs or {} # Prepare the payload JSON parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} try: # Initialize the OctoAI client from octoai import client octoai_client = client.Client(token=self.octoai_api_token) ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
b28e95e7b76d-0
Source code for langchain.llms.huggingface_pipeline """Wrapper around HuggingFace Pipeline APIs.""" import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from la...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
b28e95e7b76d-1
""" pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" [docs] class Config: ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
b28e95e7b76d-2
else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
b28e95e7b76d-3
) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_pipeline.html
4f60d15c2512-0
Source code for langchain.llms.forefrontai """Wrapper around ForefrontAI APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.util...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
4f60d15c2512-1
def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
4f60d15c2512-2
""" response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params, **kwargs}, ) ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
44e08aca2d59-0
Source code for langchain.llms.self_hosted """Run model inference on self-hosted remote hardware.""" import importlib.util import logging import pickle from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llm...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
44e08aca2d59-1
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer ass...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
44e08aca2d59-2
llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server): .. code-block:: python from langchain.ll...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
44e08aca2d59-3
load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.for...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
44e08aca2d59-4
if not isinstance(pipeline, str): logger.warning( "Serializing pipeline to send to remote hardware. " "Note, it can be quite slow" "to serialize and send large models with each execution. " "Consider sending the pipeline" "to th...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted.html
1e1bd291aa97-0
Source code for langchain.llms.manifest """Wrapper around HazyResearch's Manifest library.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class ManifestWrapper(...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/manifest.html
1e1bd291aa97-1
if stop is not None and len(stop) != 1: raise NotImplementedError( f"Manifest currently only supports a single stop token, got {stop}" ) params = self.llm_kwargs or {} params = {**params, **kwargs} if stop is not None: params["stop_token"] = st...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/manifest.html
89c54fb9a69c-0
Source code for langchain.llms.anthropic """Wrapper around Anthropic APIs.""" import re import warnings from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Union from pydantic import BaseModel, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
89c54fb9a69c-1
"""Validate that api key and python package exists in environment.""" anthropic_api_key = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY" ) """Get custom api url from environment.""" anthropic_api_url = get_from_dict_or_env( values, ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
89c54fb9a69c-2
@property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{}, **self._default_params} def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameEr...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
89c54fb9a69c-3
response = model(prompt) """ [docs] @root_validator() def raise_warning(cls, values: Dict) -> Dict: """Raise warning that this class is deprecated.""" warnings.warn( "This Anthropic LLM is deprecated. " "Please use `from langchain.chat_models import ChatAnthropic` inst...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
89c54fb9a69c-4
Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt) """ stop = self._get_a...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
89c54fb9a69c-5
) current_completion = "" async for data in stream_resp: delta = data["completion"][len(current_completion) :] current_completion = data["completion"] if run_manager: await run_manager.on_llm_new_token(delta, **data) ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
0bd0815bca5c-0
Source code for langchain.llms.llamacpp """Wrapper around llama.cpp.""" import logging from typing import Any, Dict, Generator, List, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-1
f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-2
"""Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_token...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-3
except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: rai...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-4
Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-5
return combined_text_output else: params = self._get_parameters(stop) params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] [docs] def stream( self, prompt: str, stop: Optional[Li...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
0bd0815bca5c-6
""" params = self._get_parameters(stop) result = self.client(prompt=prompt, stream=True, **params) for chunk in result: token = chunk["choices"][0]["text"] log_probs = chunk["choices"][0].get("logprobs", None) if run_manager: run_manager.on_llm...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
a0f77b83ed5d-0
Source code for langchain.llms.petals """Wrapper around Petals API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils imp...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/petals.html
a0f77b83ed5d-1
"""Whether or not to use sampling; use greedy decoding otherwise.""" max_length: Optional[int] = None """The maximum length of the sequence to be generated.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/petals.html
a0f77b83ed5d-2
) try: from petals import DistributedBloomForCausalLM from transformers import BloomTokenizerFast model_name = values["model_name"] values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name) values["client"] = DistributedBloomForCausalLM.from...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/petals.html
a0f77b83ed5d-3
"""Call the Petals API.""" params = self._default_params params = {**params, **kwargs} inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] outputs = self.client.generate(inputs, **params) text = self.tokenizer.decode(outputs[0]) if stop is not None: ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/petals.html
077e5cda6d64-0
Source code for langchain.llms.predictionguard """Wrapper around Prediction Guard APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
077e5cda6d64-1
"""Your Prediction Guard access token.""" stop: Optional[List[str]] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the access token and pyt...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
077e5cda6d64-2
Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = pgllm("Tell me a joke.") """ import predictionguard as pg params = self._default_params ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
5ecfd85c6eeb-0
Source code for langchain.llms.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from abc import abstractmethod from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun f...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
5ecfd85c6eeb-1
[docs] @abstractmethod def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request he...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
5ecfd85c6eeb-2
"default" ) se = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
5ecfd85c6eeb-3
response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
5ecfd85c6eeb-4
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.endpoint_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "sagemaker_e...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
5ecfd85c6eeb-5
if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to the sagemaker endpoint. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
01df9c1c8b1a-0
Source code for langchain.llms.openllm """Wrapper around OpenLLM APIs.""" from __future__ import annotations import copy import json import logging from typing import ( TYPE_CHECKING, Any, Dict, List, Literal, Optional, TypedDict, Union, overload, ) from pydantic import PrivateAttr f...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-1
) llm("What is the difference between a duck and a goose?") For all available supported models, you can run 'openllm models'. If you have a OpenLLM server running, you can also use it remotely: .. code-block:: python from langchain.llms import OpenLLM llm = OpenLLM(se...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-2
@overload def __init__( self, model_name: Optional[str] = ..., *, model_id: Optional[str] = ..., embedded: Literal[True, False] = ..., **llm_kwargs: Any, ) -> None: ... @overload def __init__( self, *, server_url: str = ...,...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-3
super().__init__( **{ "server_url": server_url, "server_type": server_type, "llm_kwargs": llm_kwargs, } ) self._runner = None # type: ignore self._client = client else: as...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-4
model_id='google/flan-t5-large', embedded=False, ) tools = load_tools(["serpapi", "llm-math"], llm=llm) agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION ) svc = bentoml.Service("langchain-openllm...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-5
return "openllm_client" if self._client else "openllm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> str: try: import openllm except ImportError as e: ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
01df9c1c8b1a-6
) if self._client: return await self._client.acall( "generate", prompt, **config.model_dump(flatten=True) ) else: assert self._runner is not None ( prompt, generate_kwargs, postprocess_kwargs,...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
37b11832c544-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
37b11832c544-1
presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
37b11832c544-2
gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportErr...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
37b11832c544-3
**kwargs: Any, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params = {**params, **kwar...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
80b0141a0d4b-0
Source code for langchain.llms.modal """Wrapper around Modal API.""" import logging from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain....
https://api.python.langchain.com/en/latest/_modules/langchain/llms/modal.html
80b0141a0d4b-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/modal.html
d27d2a58fef1-0
Source code for langchain.llms.rwkv """Wrapper for the RWKV model. Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py """ from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import BaseModel, Extra, roo...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
d27d2a58fef1-1
"""Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim..""" penalty_alpha_presence: float = 0.4 """Positive values penalize new tokens based on whether they appear in the text so far, increasing ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
d27d2a58fef1-2
def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: import tokenizers except ImportError: raise ImportError( "Could not import tokenizers python package. " "Please inst...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
d27d2a58fef1-3
"""Return the type of llm.""" return "rwkv" [docs] def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any: AVOID_REPEAT_TOKENS = [] AVOID_REPEAT = ",:?!" for i in AVOID_REPEAT: dd = self.pipeline.encode(i) assert len(dd) == 1 AVOID_REPEA...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
d27d2a58fef1-4
) END_OF_TEXT = 0 if token == END_OF_TEXT: break if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 logits = self.run_rnn([token]) xxx = self.tokenizer.decode(self.model_to...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/rwkv.html
3ac2ee076bb4-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enf...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
3ac2ee076bb4-1
"""Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "hugg...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
3ac2ee076bb4-2
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
2e70c5ca2066-0
Source code for langchain.llms.cohere """Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
2e70c5ca2066-1
"""Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.l...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
2e70c5ca2066-2
"""Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
2e70c5ca2066-3
**kwargs: Any, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
46eee3f07fb0-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
46eee3f07fb0-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
46eee3f07fb0-2
) params = self.model_kwargs or {} params = {**params, **kwargs} api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params, } response = banana.run(api_...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
0484d5e74466-0
Source code for langchain.llms.pipelineai """Wrapper around Pipeline Cloud API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from l...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
0484d5e74466-1
extra = values.get("pipeline_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
0484d5e74466-2
raise ValueError( "Could not import pipeline-ai python package. " "Please install it with `pip install pipeline-ai`." ) client = PipelineCloud(token=self.pipeline_api_key) params = self.pipeline_kwargs or {} params = {**params, **kwargs} run = ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
319d42b5bcda-0
Source code for langchain.llms.human from typing import Any, Callable, List, Mapping, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens def _display_prompt(prompt: str) -> None: ...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html
319d42b5bcda-1
"""Returns the type of LLM.""" return "human-input" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """ Displays the prompt to the user and returns the...
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html