id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
6da7296b5c09-5
|
"""
cluster_driver_port: Optional[str] = None
"""The port number used by the HTTP server running on the cluster driver node.
The server should listen on the driver IP address or simply ``0.0.0.0`` to connect.
We recommend the server using a port number between ``[3000, 8000]``.
"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Extra parameters to pass to the endpoint."""
transform_input_fn: Optional[Callable] = None
"""A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible
request object that the endpoint accepts.
For example, you can apply a prompt template to the input prompt.
"""
transform_output_fn: Optional[Callable[..., str]] = None
"""A function that transforms the output from the endpoint to the generated text.
"""
_client: _DatabricksClientBase = PrivateAttr()
class Config:
extra = Extra.forbid
underscore_attrs_are_private = True
@validator("cluster_id", always=True)
def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_id.")
elif values["endpoint_name"]:
return None
elif v:
return v
else:
try:
if v := get_repl_context().clusterId:
return v
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
"Neither endpoint_name nor cluster_id was set. "
"And the cluster_id cannot be automatically determined. Received"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
6da7296b5c09-6
|
"And the cluster_id cannot be automatically determined. Received"
f" error: {e}"
)
@validator("cluster_driver_port", always=True)
def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_driver_port.")
elif values["endpoint_name"]:
return None
elif v is None:
raise ValueError(
"Must set cluster_driver_port to connect to a cluster driver."
)
elif int(v) <= 0:
raise ValueError(f"Invalid cluster_driver_port: {v}")
else:
return v
@validator("model_kwargs", always=True)
def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if v:
assert "prompt" not in v, "model_kwargs must not contain key 'prompt'"
assert "stop" not in v, "model_kwargs must not contain key 'stop'"
return v
def __init__(self, **data: Any):
super().__init__(**data)
if self.endpoint_name:
self._client = _DatabricksServingEndpointClient(
host=self.host,
api_token=self.api_token,
endpoint_name=self.endpoint_name,
)
elif self.cluster_id and self.cluster_driver_port:
self._client = _DatabricksClusterDriverProxyClient(
host=self.host,
api_token=self.api_token,
cluster_id=self.cluster_id,
cluster_driver_port=self.cluster_driver_port,
)
else:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
6da7296b5c09-7
|
)
else:
raise ValueError(
"Must specify either endpoint_name or cluster_id/cluster_driver_port."
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "databricks"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Queries the LLM endpoint with the given prompt and stop sequence."""
# TODO: support callbacks
request = {"prompt": prompt, "stop": stop}
request.update(kwargs)
if self.model_kwargs:
request.update(self.model_kwargs)
if self.transform_input_fn:
request = self.transform_input_fn(**request)
response = self._client.post(request)
if self.transform_output_fn:
response = self.transform_output_fn(response)
return response
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
7ee100406056-0
|
Source code for langchain.llms.openllm
from __future__ import annotations
import copy
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
TypedDict,
Union,
overload,
)
from pydantic import PrivateAttr
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
if TYPE_CHECKING:
import openllm
ServerType = Literal["http", "grpc"]
[docs]class IdentifyingParams(TypedDict):
"""Parameters for identifying a model as a typed dict."""
model_name: str
model_id: Optional[str]
server_url: Optional[str]
server_type: Optional[ServerType]
embedded: bool
llm_kwargs: Dict[str, Any]
logger = logging.getLogger(__name__)
[docs]class OpenLLM(LLM):
"""OpenLLM, supporting both in-process model
instance and remote OpenLLM servers.
To use, you should have the openllm library installed:
.. code-block:: bash
pip install openllm
Learn more at: https://github.com/bentoml/openllm
Example running an LLM model locally managed by OpenLLM:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
)
llm("What is the difference between a duck and a goose?")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-1
|
)
llm("What is the difference between a duck and a goose?")
For all available supported models, you can run 'openllm models'.
If you have a OpenLLM server running, you can also use it remotely:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(server_url='http://localhost:3000')
llm("What is the difference between a duck and a goose?")
"""
model_name: Optional[str] = None
"""Model name to use. See 'openllm models' for all available models."""
model_id: Optional[str] = None
"""Model Id to use. If not provided, will use the default model for the model name.
See 'openllm models' for all available model variants."""
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
server_type: ServerType = "http"
"""Optional server type. Either 'http' or 'grpc'."""
embedded: bool = True
"""Initialize this LLM instance in current process by default. Should
only set to False when using in conjunction with BentoML Service."""
llm_kwargs: Dict[str, Any]
"""Key word arguments to be passed to openllm.LLM"""
_runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None)
_client: Union[
openllm.client.HTTPClient, openllm.client.GrpcClient, None
] = PrivateAttr(default=None)
class Config:
extra = "forbid"
@overload
def __init__(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-2
|
@overload
def __init__(
self,
model_name: Optional[str] = ...,
*,
model_id: Optional[str] = ...,
embedded: Literal[True, False] = ...,
**llm_kwargs: Any,
) -> None:
...
@overload
def __init__(
self,
*,
server_url: str = ...,
server_type: Literal["grpc", "http"] = ...,
**llm_kwargs: Any,
) -> None:
...
def __init__(
self,
model_name: Optional[str] = None,
*,
model_id: Optional[str] = None,
server_url: Optional[str] = None,
server_type: Literal["grpc", "http"] = "http",
embedded: bool = True,
**llm_kwargs: Any,
):
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm.'"
) from e
llm_kwargs = llm_kwargs or {}
if server_url is not None:
logger.debug("'server_url' is provided, returning a openllm.Client")
assert (
model_id is None and model_name is None
), "'server_url' and {'model_id', 'model_name'} are mutually exclusive"
client_cls = (
openllm.client.HTTPClient
if server_type == "http"
else openllm.client.GrpcClient
)
client = client_cls(server_url)
super().__init__(
**{
"server_url": server_url,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-3
|
super().__init__(
**{
"server_url": server_url,
"server_type": server_type,
"llm_kwargs": llm_kwargs,
}
)
self._runner = None # type: ignore
self._client = client
else:
assert model_name is not None, "Must provide 'model_name' or 'server_url'"
# since the LLM are relatively huge, we don't actually want to convert the
# Runner with embedded when running the server. Instead, we will only set
# the init_local here so that LangChain users can still use the LLM
# in-process. Wrt to BentoML users, setting embedded=False is the expected
# behaviour to invoke the runners remotely.
# We need to also enable ensure_available to download and setup the model.
runner = openllm.Runner(
model_name=model_name,
model_id=model_id,
init_local=embedded,
ensure_available=True,
**llm_kwargs,
)
super().__init__(
**{
"model_name": model_name,
"model_id": model_id,
"embedded": embedded,
"llm_kwargs": llm_kwargs,
}
)
self._client = None # type: ignore
self._runner = runner
@property
def runner(self) -> openllm.LLMRunner:
"""
Get the underlying openllm.LLMRunner instance for integration with BentoML.
Example:
.. code-block:: python
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
embedded=False,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-4
|
model_id='google/flan-t5-large',
embedded=False,
)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
svc = bentoml.Service("langchain-openllm", runners=[llm.runner])
@svc.api(input=Text(), output=Text())
def chat(input_text: str):
return agent.run(input_text)
"""
if self._runner is None:
raise ValueError("OpenLLM must be initialized locally with 'model_name'")
return self._runner
@property
def _identifying_params(self) -> IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client.configuration)
model_name = self._client.model_name
model_id = self._client.model_id
else:
if self._runner is None:
raise ValueError("Runner must be initialized.")
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(
json.loads(self._runner.identifying_params["configuration"])
)
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(
server_url=self.server_url,
server_type=self.server_type,
embedded=self.embedded,
llm_kwargs=self.llm_kwargs,
model_name=model_name,
model_id=model_id,
)
@property
def _llm_type(self) -> str:
return "openllm_client" if self._client else "openllm"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-5
|
return "openllm_client" if self._client else "openllm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
return self._client.query(prompt, **config.model_dump(flatten=True))
else:
assert self._runner is not None
return self._runner(prompt, **config.model_dump(flatten=True))
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
7ee100406056-6
|
)
if self._client:
return await self._client.acall(
"generate", prompt, **config.model_dump(flatten=True)
)
else:
assert self._runner is not None
(
prompt,
generate_kwargs,
postprocess_kwargs,
) = self._runner.llm.sanitize_parameters(prompt, **kwargs)
generated_result = await self._runner.generate.async_run(
prompt, **generate_kwargs
)
return self._runner.llm.postprocess_generate(
prompt, generated_result, **postprocess_kwargs
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
dc2d5e31a2e9-0
|
Source code for langchain.llms.bedrock
import json
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
[docs]class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
[docs] @classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic" or provider == "ai21":
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 50
return input_body
[docs] @classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
dc2d5e31a2e9-1
|
else:
return response_body.get("results")[0].get("outputText")
[docs]class Bedrock(LLM):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-tg1-large"
)
"""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-tg1-large, this is
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
dc2d5e31a2e9-2
|
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
dc2d5e31a2e9-3
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
provider = self.model_id.split(".")[0]
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
7161dc4a1860-0
|
Source code for langchain.llms.llamacpp
import logging
from typing import Any, Dict, Iterator, List, Optional
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.schema.output import GenerationChunk
from langchain.utils import get_pydantic_field_names
from langchain.utils.utils import build_extra_kwargs
logger = logging.getLogger(__name__)
[docs]class LlamaCpp(LLM):
"""llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/llama/model")
"""
client: Any #: :meta private:
model_path: str
"""The path to the Llama model file."""
lora_base: Optional[str] = None
"""The path to the Llama LoRA base model."""
lora_path: Optional[str] = None
"""The path to the Llama LoRA. If None, no LoRa is loaded."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-1
|
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(True, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-2
|
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
use_mmap: Optional[bool] = True
"""Whether to keep the model loaded in RAM"""
rope_freq_scale: float = 1.0
"""Scale factor for rope sampling."""
rope_freq_base: float = 10000.0
"""Base frequency for rope sampling."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Any additional parameters to pass to llama_cpp.Llama."""
streaming: bool = True
"""Whether to stream the results, token by token."""
verbose: bool = True
"""Print verbose output to stderr."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"rope_freq_scale",
"rope_freq_base",
"lora_path",
"lora_base",
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"use_mmap",
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-3
|
"n_threads",
"n_batch",
"use_mmap",
"last_n_tokens_size",
"verbose",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
model_params.update(values["model_kwargs"])
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, **model_params)
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
@root_validator(pre=True)
def build_model_kwargs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling llama_cpp."""
return {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-4
|
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop, # key here is convention among LLM classes
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llamacpp"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
params = self._default_params
# llama_cpp expects the "stop" key not this, so we remove it:
params.pop("stop_sequences")
# then sets it as configured, or default to an empty list:
params["stop"] = self.stop or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-5
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
if self.streaming:
# If streaming is enabled, we use the stream
# method that yields as they are generated
# and return the combined strings from the first choices's text:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result["choices"][0]["text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
7161dc4a1860-6
|
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
"""
params = {**self._get_parameters(stop), **kwargs}
result = self.client(prompt=prompt, stream=True, **params)
for part in result:
logprobs = part["choices"][0].get("logprobs", None)
chunk = GenerationChunk(
text=part["choices"][0]["text"],
generation_info={"logprobs": logprobs},
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
token=chunk.text, verbose=self.verbose, log_probs=logprobs
)
[docs] def get_num_tokens(self, text: str) -> int:
tokenized_text = self.client.tokenize(text.encode("utf-8"))
return len(tokenized_text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
|
b5a54823d981-0
|
Source code for langchain.llms.predibase
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
[docs]class Predibase(LLM):
"""Use your Predibase models with Langchain.
To use, you should have the ``predibase`` python package installed,
and have your Predibase API key.
"""
model: str
predibase_api_key: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _llm_type(self) -> str:
return "predibase"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> str:
try:
from predibase import PredibaseClient
pc = PredibaseClient(token=self.predibase_api_key)
except ImportError as e:
raise ImportError(
"Could not import Predibase Python package. "
"Please install it with `pip install predibase`."
) from e
except ValueError as e:
raise ValueError("Your API key is not correct. Please try again") from e
# load model and version
results = pc.prompt(prompt, model_name=self.model)
return results[0].response
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predibase.html
|
0c01f6b18b31-0
|
Source code for langchain.llms.azureml_endpoint
import json
import urllib.request
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
[docs]class AzureMLEndpointClient(object):
"""AzureML Managed Endpoint client."""
[docs] def __init__(
self, endpoint_url: str, endpoint_api_key: str, deployment_name: str = ""
) -> None:
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
[docs] def call(self, body: bytes, **kwargs: Any) -> bytes:
"""call."""
# The azureml-model-deployment header will force the request to go to a
# specific deployment. Remove this header to have the request observe the
# endpoint traffic rules.
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + self.endpoint_api_key),
}
if self.deployment_name != "":
headers["azureml-model-deployment"] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=kwargs.get("timeout", 50))
result = response.read()
return result
[docs]class ContentFormatterBase:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-1
|
result = response.read()
return result
[docs]class ContentFormatterBase:
"""Transform request and response of AzureML endpoint to match with
required schema.
"""
"""
Example:
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict
) -> bytes:
input_str = json.dumps(
{
"inputs": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(self, output: str) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
"""
content_type: Optional[str] = "application/json"
"""The MIME type of the input data passed to the endpoint"""
accepts: Optional[str] = "application/json"
"""The MIME type of the response data returned from the endpoint"""
[docs] @staticmethod
def escape_special_characters(prompt: str) -> str:
"""Escapes any special characters in `prompt`"""
escape_map = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
# Replace each occurrence of the specified characters with escaped versions
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-2
|
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
[docs] @abstractmethod
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
[docs] @abstractmethod
def format_response_payload(self, output: bytes) -> str:
"""Formats the response body according to the output
schema of the model. Returns the data type that is
received from the response.
"""
[docs]class GPT2ContentFormatter(ContentFormatterBase):
"""Content handler for GPT2"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": {"input_string": [f'"{prompt}"']}, "parameters": model_kwargs}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["0"]
[docs]class OSSContentFormatter(GPT2ContentFormatter):
"""Deprecated: Kept for backwards compatibility
Content handler for LLMs from the OSS catalog."""
content_formatter: Any = None
[docs] def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
[docs]class HFContentFormatter(ContentFormatterBase):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-3
|
"""
)
[docs]class HFContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the HuggingFace catalog."""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": [f'"{prompt}"'], "parameters": model_kwargs}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["generated_text"]
[docs]class DollyContentFormatter(ContentFormatterBase):
"""Content handler for the Dolly-v2-12b model"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {"input_string": [f'"{prompt}"']},
"parameters": model_kwargs,
}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]
[docs]class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for LLaMa"""
[docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according the the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {
"input_string": [f'"{prompt}"'],
"parameters": model_kwargs,
}
}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-4
|
"parameters": model_kwargs,
}
}
)
return str.encode(request_payload)
[docs] def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)[0]["0"]
[docs]class AzureMLOnlineEndpoint(LLM, BaseModel):
"""Azure ML Online Endpoint models.
Example:
.. code-block:: python
azure_llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
""" # noqa: E501
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: str = ""
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
deployment_name: str = ""
"""Deployment Name for Endpoint. NOT REQUIRED to call endpoint. Should be passed
to constructor or specified as env var `AZUREML_DEPLOYMENT_NAME`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-5
|
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exists in environment."""
endpoint_key = get_from_dict_or_env(
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
deployment_name = get_from_dict_or_env(
values, "deployment_name", "AZUREML_DEPLOYMENT_NAME", ""
)
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name)
return http_client
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"deployment_name": self.deployment_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
0c01f6b18b31-6
|
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter.format_request_payload(
prompt, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
955ebf401107-0
|
Source code for langchain.llms.gpt4all
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
[docs]class GPT4All(LLM):
"""GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
max_tokens: int = Field(200, alias="max_tokens")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
955ebf401107-1
|
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.1
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(8, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
955ebf401107-2
|
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
}
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
)
if values["n_threads"] is not None:
# set n_threads
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
955ebf401107-3
|
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
955ebf401107-4
|
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
71fd6876afdf-0
|
Source code for langchain.llms.gooseai
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class GooseAI(LLM):
"""GooseAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
|
71fd6876afdf-1
|
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
|
71fd6876afdf-2
|
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
|
71fd6876afdf-3
|
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
params = {**params, **kwargs}
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
|
530be5b3f987-0
|
Source code for langchain.llms.deepinfra
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
[docs]class DeepInfra(LLM):
"""DeepInfra models.
To use, you should have the ``requests`` python package installed, and the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
530be5b3f987-1
|
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"input": prompt, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
530be5b3f987-2
|
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
text = t["results"][0]["generated_text"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
642a33144b92-0
|
Source code for langchain.llms.anyscale
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class Anyscale(LLM):
"""Anyscale Service models.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
642a33144b92-1
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
if anyscale_service_url.endswith("/"):
anyscale_service_url = anyscale_service_url[:-1]
if not anyscale_service_route.startswith("/"):
anyscale_service_route = "/" + anyscale_service_route
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/routes"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
642a33144b92-2
|
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
8b3003335e76-0
|
Source code for langchain.llms.human
from typing import Any, Callable, List, Mapping, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
def _display_prompt(prompt: str) -> None:
"""Displays the given prompt to the user."""
print(f"\n{prompt}")
def _collect_user_input(
separator: Optional[str] = None, stop: Optional[List[str]] = None
) -> str:
"""Collects and returns user input as a single string."""
separator = separator or "\n"
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
if stop and any(seq in line for seq in stop):
break
# Combine all lines into a single string
multi_line_input = separator.join(lines)
return multi_line_input
[docs]class HumanInputLLM(LLM):
"""
It returns user input as the response.
"""
input_func: Callable = Field(default_factory=lambda: _collect_user_input)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
prompt_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""
Returns an empty dictionary as there are no identifying parameters.
"""
return {}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html
|
8b3003335e76-1
|
"""Returns the type of LLM."""
return "human-input"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
"""
self.prompt_func(prompt, **self.prompt_kwargs)
user_input = self.input_func(
separator=self.separator, stop=stop, **self.input_kwargs
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the human themselves
user_input = enforce_stop_tokens(user_input, stop)
return user_input
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html
|
ab96b63333ff-0
|
Source code for langchain.llms.octoai_endpoint
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class OctoAIEndpoint(LLM):
"""OctoAI LLM Endpoints.
OctoAIEndpoint is a class to interact with OctoAI
Compute Service large language model endpoints.
To use, you should have the ``octoai`` python package installed, and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://mpt-7b-demo-kk0powt97tmb.octoai.cloud/generate",
model_kwargs={
"max_new_tokens": 200,
"temperature": 0.75,
"top_p": 0.95,
"repetition_penalty": 1,
"seed": None,
"stop": [],
},
)
"""
endpoint_url: Optional[str] = None
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
octoai_api_token: Optional[str] = None
"""OCTOAI API Token"""
class Config:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
ab96b63333ff-1
|
"""OCTOAI API Token"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(
values, "octoai_api_token", "OCTOAI_API_TOKEN"
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "ENDPOINT_URL"
)
values["octoai_api_token"] = octoai_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OctoAI's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
_model_kwargs = self.model_kwargs or {}
# Prepare the payload JSON
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
ab96b63333ff-2
|
_model_kwargs = self.model_kwargs or {}
# Prepare the payload JSON
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
try:
# Initialize the OctoAI client
from octoai import client
octoai_client = client.Client(token=self.octoai_api_token)
# Send the request using the OctoAI client
resp_json = octoai_client.infer(self.endpoint_url, parameter_payload)
text = resp_json["generated_text"]
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
if stop is not None:
# Apply stop tokens when making calls to OctoAI
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
6127615a0cd8-0
|
Source code for langchain.llms.anthropic
import re
import warnings
from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.output import GenerationChunk
from langchain.utils import check_package_version, get_from_dict_or_env
class _AnthropicCommon(BaseLanguageModel):
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: str = "claude-2"
"""Model name to use."""
max_tokens_to_sample: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: Optional[int] = None
"""Number of most likely tokens to consider at each step."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
default_request_timeout: Optional[float] = None
"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""
anthropic_api_url: Optional[str] = None
anthropic_api_key: Optional[str] = None
HUMAN_PROMPT: Optional[str] = None
AI_PROMPT: Optional[str] = None
count_tokens: Optional[Callable[[str], int]] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-1
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anthropic_api_key"] = get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY"
)
# Get custom api url from environment.
values["anthropic_api_url"] = get_from_dict_or_env(
values,
"anthropic_api_url",
"ANTHROPIC_API_URL",
default="https://api.anthropic.com",
)
try:
import anthropic
check_package_version("anthropic", gte_version="0.3")
values["client"] = anthropic.Anthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"],
timeout=values["default_request_timeout"],
)
values["async_client"] = anthropic.AsyncAnthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"],
timeout=values["default_request_timeout"],
)
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
values["AI_PROMPT"] = anthropic.AI_PROMPT
values["count_tokens"] = values["client"].count_tokens
except ImportError:
raise ImportError(
"Could not import anthropic python package. "
"Please it install it with `pip install anthropic`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-2
|
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return d
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{}, **self._default_params}
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if stop is None:
stop = []
# Never want model to invent new turns of Human / Assistant dialog.
stop.extend([self.HUMAN_PROMPT])
return stop
[docs]class Anthropic(LLM, _AnthropicCommon):
"""Anthropic large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
# and AI_PROMPT.
response = model("What are the biggest risks facing humanity?")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-3
|
response = model("What are the biggest risks facing humanity?")
# Or if you want to use the chat mode, build a few-shot-prompt, or
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
raw_prompt = "What are the biggest risks facing humanity?"
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
response = model(prompt)
"""
@root_validator()
def raise_warning(cls, values: Dict) -> Dict:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain.chat_models import ChatAnthropic` instead"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic-llm"
def _wrap_prompt(self, prompt: str) -> str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if prompt.startswith(self.HUMAN_PROMPT):
return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
def _call(
self,
prompt: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-4
|
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model(prompt)
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = self.client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-5
|
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params
):
yield GenerationChunk(text=token.completion)
if run_manager:
run_manager.on_llm_new_token(token.completion)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
6127615a0cd8-6
|
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
async for token in await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**params,
):
yield GenerationChunk(text=token.completion)
if run_manager:
await run_manager.on_llm_new_token(token.completion)
[docs] def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anthropic.html
|
f94c3bb5270d-0
|
Source code for langchain.llms.sagemaker_endpoint
"""Sagemaker InvokeEndpoint API."""
from abc import abstractmethod
from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]])
OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]]])
[docs]class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects.
Similarly, the class handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
[docs] @abstractmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
f94c3bb5270d-1
|
[docs] @abstractmethod
def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
[docs] @abstractmethod
def transform_output(self, output: bytes) -> OUTPUT_TYPE:
"""Transforms the output from the model to string that
the LLM class expects.
"""
[docs]class LLMContentHandler(ContentHandlerBase[str, str]):
"""Content handler for LLM class."""
[docs]class SagemakerEndpoint(LLM):
"""Sagemaker Inference Endpoint models.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
f94c3bb5270d-2
|
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: LLMContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
f94c3bb5270d-3
|
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
f94c3bb5270d-4
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
f94c3bb5270d-5
|
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
|
cb32612253d2-0
|
Source code for langchain.llms.textgen
import logging
from typing import Any, Dict, List, Optional
import requests
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
[docs]class TextGen(LLM):
"""text-generation-webui models.
To use, you should have the text-generation-webui installed, a model loaded,
and --api added as a command-line option.
Suggested installation, use one-click installer for your OS:
https://github.com/oobabooga/text-generation-webui#one-click-installers
Parameters below taken from text-generation-webui api example:
https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:8500")
"""
model_url: str
"""The full URL to the textgen webui including http[s]://host:port """
preset: Optional[str] = None
"""The preset to use in the textgen webui """
max_new_tokens: Optional[int] = 250
"""The maximum number of tokens to generate."""
do_sample: bool = Field(True, alias="do_sample")
"""Do sample"""
temperature: Optional[float] = 1.3
"""Primary factor to control randomness of outputs. 0 = deterministic
(only the most likely token is used). Higher value = more randomness."""
top_p: Optional[float] = 0.1
"""If not set to 1, select tokens with probabilities adding up to less than this
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
|
cb32612253d2-1
|
"""If not set to 1, select tokens with probabilities adding up to less than this
number. Higher value = higher range of possible random results."""
typical_p: Optional[float] = 1
"""If not set to 1, select only tokens that are at least this much more likely to
appear than random tokens, given the prior text."""
epsilon_cutoff: Optional[float] = 0 # In units of 1e-4
"""Epsilon cutoff"""
eta_cutoff: Optional[float] = 0 # In units of 1e-4
"""ETA cutoff"""
repetition_penalty: Optional[float] = 1.18
"""Exponential penalty factor for repeating prior tokens. 1 means no penalty,
higher value = less repetition, lower value = more repetition."""
top_k: Optional[float] = 40
"""Similar to top_p, but select instead only the top_k most likely tokens.
Higher value = higher range of possible random results."""
min_length: Optional[int] = 0
"""Minimum generation length in tokens."""
no_repeat_ngram_size: Optional[int] = 0
"""If not set to 0, specifies the length of token sets that are completely blocked
from repeating at all. Higher values = blocks larger phrases,
lower values = blocks words or letters from repeating.
Only 0 or high values are a good idea in most cases."""
num_beams: Optional[int] = 1
"""Number of beams"""
penalty_alpha: Optional[float] = 0
"""Penalty Alpha"""
length_penalty: Optional[float] = 1
"""Length Penalty"""
early_stopping: bool = Field(False, alias="early_stopping")
"""Early stopping"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
|
cb32612253d2-2
|
"""Early stopping"""
seed: int = Field(-1, alias="seed")
"""Seed (-1 for random)"""
add_bos_token: bool = Field(True, alias="add_bos_token")
"""Add the bos_token to the beginning of prompts.
Disabling this can make the replies more creative."""
truncation_length: Optional[int] = 2048
"""Truncate the prompt up to this length. The leftmost tokens are removed if
the prompt exceeds this length. Most models require this to be at most 2048."""
ban_eos_token: bool = Field(False, alias="ban_eos_token")
"""Ban the eos_token. Forces the model to never end the generation prematurely."""
skip_special_tokens: bool = Field(True, alias="skip_special_tokens")
"""Skip special tokens. Some specific models need this unset."""
stopping_strings: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
streaming: bool = False
"""Whether to stream the results, token by token (currently unimplemented)."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling textgen."""
return {
"max_new_tokens": self.max_new_tokens,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_p": self.top_p,
"typical_p": self.typical_p,
"epsilon_cutoff": self.epsilon_cutoff,
"eta_cutoff": self.eta_cutoff,
"repetition_penalty": self.repetition_penalty,
"top_k": self.top_k,
"min_length": self.min_length,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
|
cb32612253d2-3
|
"no_repeat_ngram_size": self.no_repeat_ngram_size,
"num_beams": self.num_beams,
"penalty_alpha": self.penalty_alpha,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"seed": self.seed,
"add_bos_token": self.add_bos_token,
"truncation_length": self.truncation_length,
"ban_eos_token": self.ban_eos_token,
"skip_special_tokens": self.skip_special_tokens,
"stopping_strings": self.stopping_strings,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_url": self.model_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "textgen"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
if self.stopping_strings and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
|
cb32612253d2-4
|
# then sets it as configured, or default to an empty list:
params["stop"] = self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
raise ValueError("`streaming` option currently unsupported.")
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
print(prompt + result)
else:
print(f"ERROR: response: {response}")
result = ""
return result
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
|
bdf173021316-0
|
Source code for langchain.llms.nlpcloud
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
[docs]class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain.llms import NLPCloud
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True
"""Whether to use a GPU or not"""
lang: str = "en"
"""Language to use (multilingual addon)"""
temperature: float = 0.7
"""What sampling temperature to use."""
min_length: int = 1
"""The minimum number of tokens to generate in the completion."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
|
bdf173021316-1
|
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
length_penalty: float = 1.0
"""Exponential penalty to the length."""
do_sample: bool = True
"""Whether to use sampling (True) or greedy decoding."""
num_beams: int = 1
"""Number of beams for beam search."""
early_stopping: bool = False
"""Whether to stop beam search at num_beams sentences."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"],
nlpcloud_api_key,
gpu=values["gpu"],
lang=values["lang"],
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
|
bdf173021316-2
|
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"min_length": self.min_length,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
"do_sample": self.do_sample,
"num_beams": self.num_beams,
"early_stopping": self.early_stopping,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**{"gpu": self.gpu},
**{"lang": self.lang},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
|
bdf173021316-3
|
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
|
13308b12162c-0
|
Source code for langchain.llms.aleph_alpha
from typing import Any, Dict, List, Optional, Sequence
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-1
|
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-2
|
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-3
|
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-4
|
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-5
|
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
13308b12162c-6
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
|
9305743edd50-0
|
Source code for langchain.llms.openai
from __future__ import annotations
import logging
import sys
import warnings
from typing import (
AbstractSet,
Any,
AsyncIterator,
Callable,
Collection,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from pydantic import Field, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
from langchain.utils.utils import build_extra_kwargs
logger = logging.getLogger(__name__)
[docs]def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None),
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-1
|
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
),
)
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
[docs]def completion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-2
|
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
[docs]async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
[docs]class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field("text-davinci-003", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-3
|
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-4
|
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"):
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-5
|
)
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-6
|
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"request_timeout": self.request_timeout,
"logit_bias": self.logit_bias,
}
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-7
|
self, prompt=prompt, run_manager=run_manager, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutate params
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-8
|
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
choices.extend(response["choices"])
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-9
|
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(choices, prompts, token_usage)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-10
|
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(choices, prompts, token_usage)
[docs] def get_sub_prompts(
self,
params: Dict[str, Any],
prompts: List[str],
stop: Optional[List[str]] = None,
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
[docs] def create_llm_result(
self, choices: Any, prompts: List[str], token_usage: Dict[str, int]
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-11
|
generations.append(
[
Generation(
text=choice["text"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
for choice in sub_choices
]
)
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return LLMResult(generations=generations, llm_output=llm_output)
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai"
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-12
|
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
[docs] @staticmethod
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
"""
model_token_mapping = {
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-13
|
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"text-ada-001": 2049,
"ada": 2049,
"text-babbage-001": 2040,
"babbage": 2049,
"text-curie-001": 2049,
"curie": 2049,
"davinci": 2049,
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]
context_size = model_token_mapping.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
)
return context_size
@property
def max_context_size(self) -> int:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-14
|
return context_size
@property
def max_context_size(self) -> int:
"""Get max context size for this model."""
return self.modelname_to_contextsize(self.model_name)
[docs] def max_tokens_for_prompt(self, prompt: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The maximum number of tokens to generate for a prompt.
Example:
.. code-block:: python
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
"""
num_tokens = self.get_num_tokens(prompt)
return self.max_context_size - num_tokens
[docs]class OpenAI(BaseOpenAI):
"""OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="text-davinci-003")
"""
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
[docs]class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-15
|
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import AzureOpenAI
openai = AzureOpenAI(model_name="text-davinci-003")
"""
deployment_name: str = ""
"""Deployment name to use."""
openai_api_type: str = ""
openai_api_version: str = ""
@root_validator()
def validate_azure_settings(cls, values: Dict) -> Dict:
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", "azure"
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
openai_params = {
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
[docs]class OpenAIChat(BaseLLM):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-16
|
return "azure"
[docs]class OpenAIChat(BaseLLM):
"""OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAIChat
openaichat = OpenAIChat(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
@root_validator(pre=True)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-17
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-18
|
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _get_chat_params(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-19
|
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
yield GenerationChunk(text=token)
if run_manager:
run_manager.on_llm_new_token(token)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-20
|
token = stream_resp["choices"][0]["delta"].get("content", "")
yield GenerationChunk(text=token)
if run_manager:
await run_manager.on_llm_new_token(token)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
9305743edd50-21
|
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_token_ids(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.