id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
7e3b2e8745c3-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import Any, List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
7e3b2e8745c3-1
"""Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
7e3b2e8745c3-2
generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } params = {**self._identifying_params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
7e3b2e8745c3-3
parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], params, self.pl_tags, resp, request_start_time, request_end_time,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
7e3b2e8745c3-4
resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } params = {**self._identifying_params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
7e3b2e8745c3-5
generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
bc24325c6b08-0
Source code for langchain.llms.cerebriumai """Wrapper around CerebriumAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class CerebriumAI(LLM): """Wrapper around CerebriumAI large language models. To use, you should have the ``cerebrium`` python package installed, and the environment variable ``CEREBRIUMAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import CerebriumAI cerebrium = CerebriumAI(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" cerebriumai_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic config.""" extra = Extra.forbid [docs] @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
bc24325c6b08-1
all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY" ) values["cerebriumai_api_key"] = cerebriumai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "cerebriumai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to CerebriumAI endpoint.""" try:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
bc24325c6b08-2
"""Call to CerebriumAI endpoint.""" try: from cerebrium import model_api_request except ImportError: raise ValueError( "Could not import cerebrium python package. " "Please install it with `pip install cerebrium`." ) params = self.model_kwargs or {} response = model_api_request( self.endpoint_url, {"prompt": prompt, **params, **kwargs}, self.cerebriumai_api_key, ) text = response["data"]["result"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
7c2ace038c1e-0
Source code for langchain.llms.bedrock import json from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class LLMInputOutputAdapter: """Adapter class to prepare the inputs from Langchain to a format that LLM model expects. Also, provides helper function to extract the generated text from the model response.""" @classmethod def prepare_input( cls, provider: str, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: input_body = {**model_kwargs} if provider == "anthropic" or provider == "ai21": input_body["prompt"] = prompt elif provider == "amazon": input_body = dict() input_body["inputText"] = prompt input_body["textGenerationConfig"] = {**model_kwargs} else: input_body["inputText"] = prompt if provider == "anthropic" and "max_tokens_to_sample" not in input_body: input_body["max_tokens_to_sample"] = 50 return input_body @classmethod def prepare_output(cls, provider: str, response: Any) -> str: if provider == "anthropic": response_body = json.loads(response.get("body").read().decode()) return response_body.get("completion") else: response_body = json.loads(response.get("body").read()) if provider == "ai21": return response_body.get("completions")[0].get("data").get("text") else:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
7c2ace038c1e-1
else: return response_body.get("results")[0].get("outputText") [docs]class Bedrock(LLM): """LLM provider to invoke Bedrock models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from bedrock_langchain.bedrock_llm import BedrockLLM llm = BedrockLLM( credentials_profile_name="default", model_id="amazon.titan-tg1-large" ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str """Id of the model to call, e.g., amazon.titan-tg1-large, this is
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
7c2ace038c1e-2
equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" # Skip creating new client if passed in constructor if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
7c2ace038c1e-3
} @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_bedrock" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Bedrock service model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} provider = self.model_id.split(".")[0] params = {**_model_kwargs, **kwargs} input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params) body = json.dumps(input_body) accept = "application/json" contentType = "application/json" try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accept, contentType=contentType ) text = LLMInputOutputAdapter.prepare_output(provider, response) except Exception as e: raise ValueError(f"Error raised by bedrock service: {e}") if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
2a381b90ad3b-0
Source code for langchain.llms.nlpcloud """Wrapper around NLPCloud APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class NLPCloud(LLM): """Wrapper around NLPCloud large language models. To use, you should have the ``nlpcloud`` python package installed, and the environment variable ``NLPCLOUD_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import NLPCloud nlpcloud = NLPCloud(model="gpt-neox-20b") """ client: Any #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" min_length: int = 1 """The minimum number of tokens to generate in the completion.""" max_length: int = 256 """The maximum number of tokens to generate in the completion.""" length_no_input: bool = True """Whether min_length and max_length should include the length of the input.""" remove_input: bool = True """Remove input text from API response""" remove_end_sequence: bool = True """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" top_p: int = 1 """Total probability mass of tokens to consider at each step."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
2a381b90ad3b-1
"""Total probability mass of tokens to consider at each step.""" top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens. 1.0 means no penalty.""" length_penalty: float = 1.0 """Exponential penalty to the length.""" do_sample: bool = True """Whether to use sampling (True) or greedy decoding.""" num_beams: int = 1 """Number of beams for beam search.""" early_stopping: bool = False """Whether to stop beam search at num_beams sentences.""" num_return_sequences: int = 1 """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=True, lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values @property def _default_params(self) -> Mapping[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
2a381b90ad3b-2
@property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling NLPCloud API.""" return { "temperature": self.temperature, "min_length": self.min_length, "max_length": self.max_length, "length_no_input": self.length_no_input, "remove_input": self.remove_input, "remove_end_sequence": self.remove_end_sequence, "bad_words": self.bad_words, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "length_penalty": self.length_penalty, "do_sample": self.do_sample, "num_beams": self.num_beams, "early_stopping": self.early_stopping, "num_return_sequences": self.num_return_sequences, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "nlpcloud" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to NLPCloud's create endpoint. Args: prompt: The prompt to pass into the model. stop: Not supported by this interface (pass in init method) Returns: The string generated by the model. Example:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
2a381b90ad3b-3
Returns: The string generated by the model. Example: .. code-block:: python response = nlpcloud("Tell me a joke.") """ if stop and len(stop) > 1: raise ValueError( "NLPCloud only supports a single stop sequence per generation." "Pass in a list of length 1." ) elif stop and len(stop) == 1: end_sequence = stop[0] else: end_sequence = None params = {**self._default_params, **kwargs} response = self.client.generation(prompt, end_sequence=end_sequence, **params) return response["generated_text"]
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
2435f60deae9-0
Source code for langchain.llms.self_hosted_hugging_face """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.""" import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
2435f60deae9-1
text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
2435f60deae9-2
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline [docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline): """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
2435f60deae9-3
hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task ("text-generation", "text2text-generation" or "summarization").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
2435f60deae9-4
"""Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: return self.client( pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
cadb2d79d842-0
Source code for langchain.llms.baseten """Wrapper around Baseten deployed model API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class Baseten(LLM): """Use your Baseten models in Langchain To use, you should have the ``baseten`` python package installed, and run ``baseten.login()`` with your Baseten API key. The required ``model`` param can be either a model id or model version id. Using a model version ID will result in slightly faster invocation. Any other model parameters can also be passed in with the format input={model_param: value, ...} The Baseten model must accept a dictionary of input with the key "prompt" and return a dictionary with a key "data" which maps to a list of response strings. Example: .. code-block:: python from langchain.llms import Baseten my_model = Baseten(model="MODEL_ID") output = my_model("prompt") """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "baseten"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html
cadb2d79d842-1
"""Return type of model.""" return "baseten" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Baseten deployed model endpoint.""" try: import baseten except ImportError as exc: raise ValueError( "Could not import Baseten Python package. " "Please install it with `pip install baseten`." ) from exc # get the model and version try: model = baseten.deployed_model_version_id(self.model) response = model.predict({"prompt": prompt}) except baseten.common.core.ApiError: model = baseten.deployed_model_id(self.model) response = model.predict({"prompt": prompt}) return "".join(response)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html
52f7cdd684a0-0
Source code for langchain.llms.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env INSTRUCTION_KEY = "### Instruction:" RESPONSE_KEY = "### Response:" INTRO_BLURB = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request." ) PROMPT_FOR_GENERATION_FORMAT = """{intro} {instruction_key} {instruction} {response_key} """.format( intro=INTRO_BLURB, instruction_key=INSTRUCTION_KEY, instruction="{instruction}", response_key=RESPONSE_KEY, ) [docs]class MosaicML(LLM): """Wrapper around MosaicML's LLM inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicML endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict" ) mosaic_llm = MosaicML( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = (
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
52f7cdd684a0-1
) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict" ) """Endpoint URL to use.""" inject_instruction_format: bool = False """Whether to inject the instruction format into the prompt.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "mosaic" def _transform_prompt(self, prompt: str) -> str: """Transform prompt.""" if self.inject_instruction_format: prompt = PROMPT_FOR_GENERATION_FORMAT.format( instruction=prompt,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
52f7cdd684a0-2
prompt = PROMPT_FOR_GENERATION_FORMAT.format( instruction=prompt, ) return prompt def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, is_retry: bool = False, **kwargs: Any, ) -> str: """Call out to a MosaicML LLM inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = mosaic_llm("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} prompt = self._transform_prompt(prompt) payload = {"input_strings": [prompt]} payload.update(_model_kwargs) payload.update(kwargs) # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
52f7cdd684a0-3
): import time time.sleep(self.retry_sleep) return self._call(prompt, stop, run_manager, is_retry=True) raise ValueError( f"Error raised by inference API: {parsed_response['error']}" ) # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): if "data" in parsed_response: output_item = parsed_response["data"] elif "output" in parsed_response: output_item = parsed_response["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list): text = output_item[0] else: text = output_item elif isinstance(parsed_response, list): first_item = parsed_response[0] if isinstance(first_item, str): text = first_item elif isinstance(first_item, dict): if "output" in parsed_response: text = first_item["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) else: raise ValueError(f"Unexpected response format: {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") text = text[len(prompt) :] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) # TODO: replace when MosaicML supports custom stop tokens natively if stop is not None: text = enforce_stop_tokens(text, stop)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
52f7cdd684a0-4
if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
c164013e0fde-0
Source code for langchain.llms.huggingface_text_gen_inference """Wrapper around Huggingface text generation inference API.""" from functools import partial from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM [docs]class HuggingFaceTextGenInference(LLM): """ HuggingFace text generation inference API. This class is a wrapper around the HuggingFace text generation inference API. It is used to generate text from a given prompt. Attributes: - max_new_tokens: The maximum number of tokens to generate. - top_k: The number of top-k tokens to consider when generating text. - top_p: The cumulative probability threshold for generating text. - typical_p: The typical probability threshold for generating text. - temperature: The temperature to use when generating text. - repetition_penalty: The repetition penalty to use when generating text. - stop_sequences: A list of stop sequences to use when generating text. - seed: The seed to use when generating text. - inference_server_url: The URL of the inference server to use. - timeout: The timeout value in seconds to use while connecting to inference server. - server_kwargs: The keyword arguments to pass to the inference server. - client: The client object used to communicate with the inference server. - async_client: The async client object used to communicate with the server. Methods: - _call: Generates text based on a given prompt and stop sequences. - _acall: Async generates text based on a given prompt and stop sequences.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
c164013e0fde-1
- _acall: Async generates text based on a given prompt and stop sequences. - _llm_type: Returns the type of LLM. """ """ Example: .. code-block:: python # Basic Example (no streaming) llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, ) print(llm("What is Deep Learning?")) # Streaming response example from langchain.callbacks import streaming_stdout callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()] llm = HuggingFaceTextGenInference( inference_server_url = "http://localhost:8010/", max_new_tokens = 512, top_k = 10, top_p = 0.95, typical_p = 0.95, temperature = 0.01, repetition_penalty = 1.03, callbacks = callbacks, stream = True ) print(llm("What is Deep Learning?")) """ max_new_tokens: int = 512 top_k: Optional[int] = None top_p: Optional[float] = 0.95 typical_p: Optional[float] = 0.95 temperature: float = 0.8 repetition_penalty: Optional[float] = None stop_sequences: List[str] = Field(default_factory=list) seed: Optional[int] = None
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
c164013e0fde-2
seed: Optional[int] = None inference_server_url: str = "" timeout: int = 120 server_kwargs: Dict[str, Any] = Field(default_factory=dict) stream: bool = False client: Any async_client: Any [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: import text_generation values["client"] = text_generation.Client( values["inference_server_url"], timeout=values["timeout"], **values["server_kwargs"], ) values["async_client"] = text_generation.AsyncClient( values["inference_server_url"], timeout=values["timeout"], **values["server_kwargs"], ) except ImportError: raise ImportError( "Could not import text_generation python package. " "Please install it with `pip install text_generation`." ) return values @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_textgen_inference" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if stop is None: stop = self.stop_sequences else: stop += self.stop_sequences if not self.stream: res = self.client.generate( prompt, stop_sequences=stop,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
c164013e0fde-3
res = self.client.generate( prompt, stop_sequences=stop, max_new_tokens=self.max_new_tokens, top_k=self.top_k, top_p=self.top_p, typical_p=self.typical_p, temperature=self.temperature, repetition_penalty=self.repetition_penalty, seed=self.seed, **kwargs, ) # remove stop sequences from the end of the generated text for stop_seq in stop: if stop_seq in res.generated_text: res.generated_text = res.generated_text[ : res.generated_text.index(stop_seq) ] text = res.generated_text else: text_callback = None if run_manager: text_callback = partial( run_manager.on_llm_new_token, verbose=self.verbose ) params = { "stop_sequences": stop, "max_new_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "typical_p": self.typical_p, "temperature": self.temperature, "repetition_penalty": self.repetition_penalty, "seed": self.seed, } text = "" for res in self.client.generate_stream(prompt, **params): token = res.token is_stop = False for stop_seq in stop: if stop_seq in token.text: is_stop = True break if is_stop: break if not token.special: if text_callback: text_callback(token.text) text += token.text return text async def _acall( self, prompt: str, stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
c164013e0fde-4
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if stop is None: stop = self.stop_sequences else: stop += self.stop_sequences if not self.stream: res = await self.async_client.generate( prompt, stop_sequences=stop, max_new_tokens=self.max_new_tokens, top_k=self.top_k, top_p=self.top_p, typical_p=self.typical_p, temperature=self.temperature, repetition_penalty=self.repetition_penalty, seed=self.seed, **kwargs, ) # remove stop sequences from the end of the generated text for stop_seq in stop: if stop_seq in res.generated_text: res.generated_text = res.generated_text[ : res.generated_text.index(stop_seq) ] text: str = res.generated_text else: text_callback = None if run_manager: text_callback = partial( run_manager.on_llm_new_token, verbose=self.verbose ) params = { **{ "stop_sequences": stop, "max_new_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "typical_p": self.typical_p, "temperature": self.temperature, "repetition_penalty": self.repetition_penalty, "seed": self.seed, }, **kwargs, } text = "" async for res in self.async_client.generate_stream(prompt, **params): token = res.token is_stop = False
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
c164013e0fde-5
token = res.token is_stop = False for stop_seq in stop: if stop_seq in token.text: is_stop = True break if is_stop: break if not token.special: if text_callback: await text_callback(token.text) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_text_gen_inference.html
b84e9fd40f0c-0
Source code for langchain.llms.vertexai """Wrapper around Google VertexAI models.""" import asyncio from concurrent.futures import Executor, ThreadPoolExecutor from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utilities.vertexai import ( init_vertexai, raise_vertex_import_error, ) if TYPE_CHECKING: from vertexai.language_models._language_models import _LanguageModel [docs]def is_codey_model(model_name: str) -> bool: return "code" in model_name class _VertexAICommon(BaseModel): client: "_LanguageModel" = None #: :meta private: model_name: str "Model name to use." temperature: float = 0.0 "Sampling temperature, it controls the degree of randomness in token selection." max_output_tokens: int = 128 "Token limit determines the maximum amount of text output from one prompt." top_p: float = 0.95 "Tokens are selected from most probable to least until the sum of their " "probabilities equals the top-p value. Top-p is ignored for Codey models." top_k: int = 40 "How the model selects tokens for output, the next token is selected from " "among the top-k most probable tokens. Top-k is ignored for Codey models." stop: Optional[List[str]] = None "Optional list of stop words to use when generating." project: Optional[str] = None
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
b84e9fd40f0c-1
project: Optional[str] = None "The default GCP project to use when making Vertex API calls." location: str = "us-central1" "The default location to use when making API calls." credentials: Any = None "The default custom credentials (google.auth.credentials.Credentials) to use " "when making API calls. If not provided, credentials will be ascertained from " "the environment." request_parallelism: int = 5 "The amount of parallelism allowed for requests issued to VertexAI models. " "Default is 5." task_executor: ClassVar[Optional[Executor]] = None @property def is_codey_model(self) -> bool: return is_codey_model(self.model_name) @property def _default_params(self) -> Dict[str, Any]: if self.is_codey_model: return { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, } else: return { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, "top_k": self.top_k, "top_p": self.top_p, } def _predict( self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any ) -> str: params = {**self._default_params, **kwargs} res = self.client.predict(prompt, **params) return self._enforce_stop_words(res.text, stop) def _enforce_stop_words(self, text: str, stop: Optional[List[str]] = None) -> str: if stop is None and self.stop is not None: stop = self.stop
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
b84e9fd40f0c-2
if stop is None and self.stop is not None: stop = self.stop if stop: return enforce_stop_tokens(text, stop) return text @property def _llm_type(self) -> str: return "vertexai" @classmethod def _get_task_executor(cls, request_parallelism: int = 5) -> Executor: if cls.task_executor is None: cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism) return cls.task_executor @classmethod def _try_init_vertexai(cls, values: Dict) -> None: allowed_params = ["project", "location", "credentials"] params = {k: v for k, v in values.items() if k in allowed_params} init_vertexai(**params) return None [docs]class VertexAI(_VertexAICommon, LLM): """Wrapper around Google Vertex AI large language models.""" model_name: str = "text-bison" "The name of the Vertex AI large language model." tuned_model_name: Optional[str] = None "The name of a tuned model. If provided, model_name is ignored." [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) tuned_model_name = values.get("tuned_model_name") model_name = values["model_name"] try: if tuned_model_name or not is_codey_model(model_name): from vertexai.preview.language_models import TextGenerationModel if tuned_model_name: values["client"] = TextGenerationModel.get_tuned_model( tuned_model_name ) else:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
b84e9fd40f0c-3
tuned_model_name ) else: values["client"] = TextGenerationModel.from_pretrained(model_name) else: from vertexai.preview.language_models import CodeGenerationModel values["client"] = CodeGenerationModel.from_pretrained(model_name) except ImportError: raise_vertex_import_error() return values async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call Vertex model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. stop: A list of stop words (optional). run_manager: A callback manager for async interaction with LLMs. Returns: The string generated by the model. """ return await asyncio.wrap_future( self._get_task_executor().submit(self._predict, prompt, stop) ) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call Vertex model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. stop: A list of stop words (optional). run_manager: A Callbackmanager for LLM run, optional. Returns: The string generated by the model. """ return self._predict(prompt, stop, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
8666b7fa1a4d-0
Source code for langchain.llms.amazon_api_gateway from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class ContentHandlerAmazonAPIGateway: """Adapter class to prepare the inputs from Langchain to a format that LLM model expects. Also, provides helper function to extract the generated text from the model response.""" @classmethod def transform_input( cls, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: return {"inputs": prompt, "parameters": model_kwargs} @classmethod def transform_output(cls, response: Any) -> str: return response.json()[0]["generated_text"] [docs]class AmazonAPIGateway(LLM): """Wrapper around custom Amazon API Gateway""" api_url: str """API Gateway URL""" headers: Optional[Dict] = None """API Gateway HTTP Headers to send, e.g. for authentication""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway() """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {
https://api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html
8666b7fa1a4d-1
_model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.api_url}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_api_gateway" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Amazon API Gateway model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} payload = self.content_handler.transform_input(prompt, _model_kwargs) try: response = requests.post( self.api_url, headers=self.headers, json=payload, ) text = self.content_handler.transform_output(response) except Exception as error: raise ValueError(f"Error raised by the service: {error}") if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/amazon_api_gateway.html
4f724f8ba27c-0
Source code for langchain.llms.anyscale """Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
4f724f8ba27c-1
extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) if anyscale_service_url.endswith("/"): anyscale_service_url = anyscale_service_url[:-1] if not anyscale_service_route.startswith("/"): anyscale_service_route = "/" + anyscale_service_route try: anyscale_service_endpoint = f"{anyscale_service_url}/-/routes" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
4f724f8ba27c-2
return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
ffc28b81d319-0
Source code for langchain.llms.azureml_endpoint """Wrapper around AzureML Managed Online Endpoint API.""" import json import urllib.request from abc import abstractmethod from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class AzureMLEndpointClient(object): """Wrapper around AzureML Managed Online Endpoint Client.""" def __init__( self, endpoint_url: str, endpoint_api_key: str, deployment_name: str ) -> None: """Initialize the class.""" if not endpoint_api_key: raise ValueError("A key should be provided to invoke the endpoint") self.endpoint_url = endpoint_url self.endpoint_api_key = endpoint_api_key self.deployment_name = deployment_name [docs] def call(self, body: bytes) -> bytes: """call.""" # The azureml-model-deployment header will force the request to go to a # specific deployment. Remove this header to have the request observe the # endpoint traffic rules. headers = { "Content-Type": "application/json", "Authorization": ("Bearer " + self.endpoint_api_key), "azureml-model-deployment": self.deployment_name, } req = urllib.request.Request(self.endpoint_url, body, headers) response = urllib.request.urlopen(req, timeout=50) result = response.read() return result class ContentFormatterBase: """A handler class to transform request and response of AzureML endpoint to match with required schema. """ """ Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
ffc28b81d319-1
""" """ Example: .. code-block:: python class ContentFormatter(ContentFormatterBase): content_type = "application/json" accepts = "application/json" def format_request_payload( self, prompt: str, model_kwargs: Dict ) -> bytes: input_str = json.dumps( { "inputs": {"input_string": [prompt]}, "parameters": model_kwargs, } ) return str.encode(input_str) def format_response_payload(self, output: str) -> str: response_json = json.loads(output) return response_json[0]["0"] """ content_type: Optional[str] = "application/json" """The MIME type of the input data passed to the endpoint""" accepts: Optional[str] = "application/json" """The MIME type of the response data returned form the endpoint""" @abstractmethod def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: """Formats the request body according to the input schema of the model. Returns bytes or seekable file like object in the format specified in the content_type request header. """ @abstractmethod def format_response_payload(self, output: bytes) -> str: """Formats the response body according to the output schema of the model. Returns the data type that is received from the response. """ [docs]class OSSContentFormatter(ContentFormatterBase): """Content handler for LLMs from the OSS catalog.""" [docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
ffc28b81d319-2
input_str = json.dumps( {"inputs": {"input_string": [prompt]}, "parameters": model_kwargs} ) return str.encode(input_str) [docs] def format_response_payload(self, output: bytes) -> str: response_json = json.loads(output) return response_json[0]["0"] [docs]class HFContentFormatter(ContentFormatterBase): """Content handler for LLMs from the HuggingFace catalog.""" [docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({"inputs": [prompt], "parameters": model_kwargs}) return str.encode(input_str) [docs] def format_response_payload(self, output: bytes) -> str: response_json = json.loads(output) return response_json[0][0]["generated_text"] [docs]class DollyContentFormatter(ContentFormatterBase): """Content handler for the Dolly-v2-12b model""" [docs] def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps( {"input_data": {"input_string": [prompt]}, "parameters": model_kwargs} ) return str.encode(input_str) [docs] def format_response_payload(self, output: bytes) -> str: response_json = json.loads(output) return response_json[0] [docs]class AzureMLOnlineEndpoint(LLM, BaseModel): """Wrapper around Azure ML Hosted models using Managed Online Endpoints. Example: .. code-block:: python azure_llm = AzureMLModel( endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
ffc28b81d319-3
endpoint_api_key="my-api-key", deployment_name="my-deployment-name", content_formatter=content_formatter, ) """ # noqa: E501 endpoint_url: str = "" """URL of pre-existing Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_URL`.""" endpoint_api_key: str = "" """Authentication Key for Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_API_KEY`.""" deployment_name: str = "" """Deployment Name for Endpoint. Should be passed to constructor or specified as env var `AZUREML_DEPLOYMENT_NAME`.""" http_client: Any = None #: :meta private: content_formatter: Any = None """The content formatter that provides an input and output transform function to handle formats between the LLM and the endpoint""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" [docs] @validator("http_client", always=True, allow_reuse=True) @classmethod def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient: """Validate that api key and python package exists in environment.""" endpoint_key = get_from_dict_or_env( values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY" ) endpoint_url = get_from_dict_or_env( values, "endpoint_url", "AZUREML_ENDPOINT_URL" ) deployment_name = get_from_dict_or_env( values, "deployment_name", "AZUREML_DEPLOYMENT_NAME" ) http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
ffc28b81d319-4
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name) return http_client @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"deployment_name": self.deployment_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "azureml_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any ) -> str: """Call out to an AzureML Managed Online endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = azureml_model("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} body = self.content_formatter.format_request_payload(prompt, _model_kwargs) endpoint_response = self.http_client.call(body) response = self.content_formatter.format_response_payload(endpoint_response) return response
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
0d7958e73dc2-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
0d7958e73dc2-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
0d7958e73dc2-2
The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} params = {**params, **kwargs} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
31f6e3b8ba31-0
Source code for langchain.llms.ctransformers """Wrapper around the C Transformers library.""" from typing import Any, Dict, Optional, Sequence from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class CTransformers(LLM): """Wrapper around the C Transformers LLM interface. To use, you should have the ``ctransformers`` python package installed. See https://github.com/marella/ctransformers Example: .. code-block:: python from langchain.llms import CTransformers llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2") """ client: Any #: :meta private: model: str """The path to a model file or directory or the name of a Hugging Face Hub model repo.""" model_type: Optional[str] = None """The model type.""" model_file: Optional[str] = None """The name of the model file in repo or directory.""" config: Optional[Dict[str, Any]] = None """The config parameters. See https://github.com/marella/ctransformers#config""" lib: Optional[str] = None """The path to a shared library or one of `avx2`, `avx`, `basic`.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_type": self.model_type, "model_file": self.model_file, "config": self.config, } @property
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
31f6e3b8ba31-1
"config": self.config, } @property def _llm_type(self) -> str: """Return type of llm.""" return "ctransformers" [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``ctransformers`` package is installed.""" try: from ctransformers import AutoModelForCausalLM except ImportError: raise ImportError( "Could not import `ctransformers` package. " "Please install it with `pip install ctransformers`" ) config = values["config"] or {} values["client"] = AutoModelForCausalLM.from_pretrained( values["model"], model_type=values["model_type"], model_file=values["model_file"], lib=values["lib"], **config, ) return values def _call( self, prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() for chunk in self.client(prompt, stop=stop, stream=True): text.append(chunk)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
31f6e3b8ba31-2
text.append(chunk) _run_manager.on_llm_new_token(chunk, verbose=self.verbose) return "".join(text)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
d26c82853b0c-0
Source code for langchain.llms.base """Base interface for large language models to expose.""" import asyncio import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.load.dump import dumpd from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose [docs]def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-1
else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts [docs]def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output [docs]class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-2
if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values [docs] @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" [docs] def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-3
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) [docs] async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate( prompt_strings, stop=stop, callbacks=callbacks, **kwargs ) def _generate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[CallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try: output = ( self._generate( prompts, stop=stop, # TODO: support multiple run managers run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: for run_manager in run_managers: run_manager.on_llm_error(e) raise e flattened_outputs = output.flatten() for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output [docs] def generate( self, prompts: List[str],
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-4
[docs] def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = callback_manager.on_llm_start( dumpd(self), prompts, invocation_params=params, options=options ) output = self._generate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = callback_manager.on_llm_start(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-5
run_managers = callback_manager.on_llm_start( dumpd(self), missing_prompts, invocation_params=params, options=options ) new_results = self._generate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def _agenerate_helper( self, prompts: List[str], stop: Optional[List[str]], run_managers: List[AsyncCallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any, ) -> LLMResult: try: output = ( await self._agenerate( prompts, stop=stop, run_manager=run_managers[0] if run_managers else None, **kwargs, ) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await asyncio.gather( *[run_manager.on_llm_error(e) for run_manager in run_managers] ) raise e flattened_outputs = output.flatten() await asyncio.gather(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-6
raise e flattened_outputs = output.flatten() await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output [docs] async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop options = {"stop": stop} ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_managers = await callback_manager.on_llm_start( dumpd(self), prompts, invocation_params=params, options=options )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-7
dumpd(self), prompts, invocation_params=params, options=options ) output = await self._agenerate_helper( prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) return output if len(missing_prompts) > 0: run_managers = await callback_manager.on_llm_start( dumpd(self), missing_prompts, invocation_params=params, options=options ) new_results = await self._agenerate_helper( missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs ) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = ( [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None ) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) [docs] def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return (
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-8
"`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks, **kwargs) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate( [prompt], stop=stop, callbacks=callbacks, **kwargs ) return result.generations[0][0].text [docs] def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop, **kwargs) [docs] def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop, **kwargs) return AIMessage(content=content) [docs] async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-9
if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop, **kwargs) [docs] async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop, **kwargs) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-10
Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-11
**kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else self._call(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else await self._acall(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)])
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
d26c82853b0c-12
) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/base.html
be94b4fea9f7-0
Source code for langchain.llms.clarifai """Wrapper around Clarifai's APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Clarifai(LLM): """Wrapper around Clarifai's large language models. To use, you should have an account on the Clarifai platform, the ``clarifai`` python package installed, and the environment variable ``CLARIFAI_PAT_KEY`` set with your PAT key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Clarifai clarifai_llm = Clarifai(clarifai_pat_key=CLARIFAI_PAT_KEY, \ user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) """ stub: Any #: :meta private: metadata: Any userDataObject: Any model_id: Optional[str] = None """Model id to use.""" model_version_id: Optional[str] = None """Model version id to use.""" app_id: Optional[str] = None """Clarifai application id to use.""" user_id: Optional[str] = None """Clarifai user id to use.""" clarifai_pat_key: Optional[str] = None api_base: str = "https://api.clarifai.com"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
be94b4fea9f7-1
api_base: str = "https://api.clarifai.com" stop: Optional[List[str]] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values["clarifai_pat_key"] = get_from_dict_or_env( values, "clarifai_pat_key", "CLARIFAI_PAT_KEY" ) user_id = values.get("user_id") app_id = values.get("app_id") model_id = values.get("model_id") if values["clarifai_pat_key"] is None: raise ValueError("Please provide a clarifai_pat_key.") if user_id is None: raise ValueError("Please provide a user_id.") if app_id is None: raise ValueError("Please provide a app_id.") if model_id is None: raise ValueError("Please provide a model_id.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return {} @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_id": self.model_id}} @property def _llm_type(self) -> str: """Return type of llm.""" return "clarifai" def _call( self, prompt: str, stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
be94b4fea9f7-2
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any ) -> str: """Call out to Clarfai's PostModelOutputs endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = clarifai_llm("Tell me a joke.") """ try: from clarifai.auth.helper import ClarifaiAuthHelper from clarifai.client import create_stub from clarifai_grpc.grpc.api import ( resources_pb2, service_pb2, ) from clarifai_grpc.grpc.api.status import status_code_pb2 except ImportError: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) auth = ClarifaiAuthHelper( user_id=self.user_id, app_id=self.app_id, pat=self.clarifai_pat_key, base=self.api_base, ) self.userDataObject = auth.get_user_app_id_proto() self.stub = create_stub(auth) params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop # The userDataObject is created in the overview and
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
be94b4fea9f7-3
# The userDataObject is created in the overview and # is required when using a PAT # If version_id None, Defaults to the latest model version post_model_outputs_request = service_pb2.PostModelOutputsRequest( user_app_id=self.userDataObject, model_id=self.model_id, version_id=self.model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt)) ) ], ) post_model_outputs_response = self.stub.PostModelOutputs( post_model_outputs_request ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: logger.error(post_model_outputs_response.status) raise Exception( "Post model outputs failed, status: " + post_model_outputs_response.status.description ) text = post_model_outputs_response.outputs[0].data.text.raw # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
b1aabcbe7bbe-0
Source code for langchain.llms.ai21 """Wrapper around AI21 APIs.""" from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class AI21PenaltyData(BaseModel): """Parameters for AI21 penalty data.""" scale: int = 0 applyToWhitespaces: bool = True applyToPunctuations: bool = True applyToNumbers: bool = True applyToStopwords: bool = True applyToEmojis: bool = True [docs]class AI21(LLM): """Wrapper around AI21 large language models. To use, you should have the environment variable ``AI21_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import AI21 ai21 = AI21(model="j2-jumbo-instruct") """ model: str = "j2-jumbo-instruct" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" maxTokens: int = 256 """The maximum number of tokens to generate in the completion.""" minTokens: int = 0 """The minimum number of tokens to generate in the completion.""" topP: float = 1.0 """Total probability mass of tokens to consider at each step.""" presencePenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
b1aabcbe7bbe-1
"""Penalizes repeated tokens.""" countPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to count.""" frequencyPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to frequency.""" numResults: int = 1 """How many completions to generate for each prompt.""" logitBias: Optional[Dict[str, float]] = None """Adjust the probability of specific tokens being generated.""" ai21_api_key: Optional[str] = None stop: Optional[List[str]] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY") values["ai21_api_key"] = ai21_api_key return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling AI21 API.""" return { "temperature": self.temperature, "maxTokens": self.maxTokens, "minTokens": self.minTokens, "topP": self.topP, "presencePenalty": self.presencePenalty.dict(), "countPenalty": self.countPenalty.dict(), "frequencyPenalty": self.frequencyPenalty.dict(), "numResults": self.numResults,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
b1aabcbe7bbe-2
"numResults": self.numResults, "logitBias": self.logitBias, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "ai21" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url else: if self.model in ("j1-grande-instruct",): base_url = "https://api.ai21.com/studio/v1/experimental" else: base_url = "https://api.ai21.com/studio/v1" params = {**self._default_params, **kwargs} response = requests.post(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
b1aabcbe7bbe-3
response = requests.post( url=f"{base_url}/{self.model}/complete", headers={"Authorization": f"Bearer {self.ai21_api_key}"}, json={"prompt": prompt, "stopSequences": stop, **params}, ) if response.status_code != 200: optional_detail = response.json().get("error") raise ValueError( f"AI21 /complete call failed with status code {response.status_code}." f" Details: {optional_detail}" ) response_json = response.json() return response_json["completions"][0]["data"]["text"]
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
623b51e04ede-0
Source code for langchain.llms.loading """Base interface for loading large language models apis.""" import json from pathlib import Path from typing import Union import yaml from langchain.llms import type_to_cls_dict from langchain.llms.base import BaseLLM [docs]def load_llm_from_config(config: dict) -> BaseLLM: """Load LLM from Config Dict.""" if "_type" not in config: raise ValueError("Must specify an LLM Type in config") config_type = config.pop("_type") if config_type not in type_to_cls_dict: raise ValueError(f"Loading {config_type} LLM not supported") llm_cls = type_to_cls_dict[config_type] return llm_cls(**config) [docs]def load_llm(file: Union[str, Path]) -> BaseLLM: """Load LLM from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Load the LLM from the config now. return load_llm_from_config(config)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/loading.html
8e59b9f13e94-0
Source code for langchain.llms.fake """Fake LLM wrapper for testing purposes.""" from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM [docs]class FakeListLLM(LLM): """Fake LLM wrapper for testing purposes.""" responses: List i: int = 0 @property def _llm_type(self) -> str: """Return type of llm.""" return "fake-list" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] self.i += 1 return response async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] self.i += 1 return response @property def _identifying_params(self) -> Mapping[str, Any]: return {"responses": self.responses}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/fake.html
702a273e9d24-0
Source code for langchain.llms.openai """Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( AbstractSet, Any, Callable, Collection, Dict, Generator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from pydantic import Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-1
"finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-2
return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) [docs]class BaseOpenAI(BaseLLM): """Wrapper around OpenAI large language models.""" @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any #: :meta private: model_name: str = Field("text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-3
"""Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-4
Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) [docs] class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True [docs] @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values):
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-5
extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-6
default="", ) try: import openai values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } # Azure gpt-35-turbo doesn't support best_of # don't specify best_of if it is 1 if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-7
"""Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): if run_manager: run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-8
if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if run_manager: await run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-9
response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) [docs] def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts [docs] def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"],
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-10
generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) [docs] def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: """Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator [docs] def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """Prepare the params for streaming.""" params = self._invocation_params if "best_of" in params and params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-11
if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" [docs] def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. "
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-12
"This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) model_name = self.tiktoken_model_name or self.model_name try: enc = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" enc = tiktoken.get_encoding(model) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) [docs] @staticmethod def modelname_to_contextsize(modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-0613": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-4-32k-0613": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "gpt-3.5-turbo-0613": 4096,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-13
"gpt-3.5-turbo-0613": 4096, "gpt-3.5-turbo-16k": 16385, "gpt-3.5-turbo-16k-0613": 16385, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # handling finetuned models if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size @property def max_context_size(self) -> int: """Get max context size for this model.""" return self.modelname_to_contextsize(self.model_name) [docs] def max_tokens_for_prompt(self, prompt: str) -> int:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-14
[docs] def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens [docs]class OpenAI(BaseOpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs]class AzureOpenAI(BaseOpenAI): """Wrapper around Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-15
Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ deployment_name: str = "" """Deployment name to use.""" openai_api_type: str = "azure" openai_api_version: str = "" [docs] @root_validator() def validate_azure_settings(cls, values: Dict) -> Dict: values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) return values @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_params, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure" [docs]class OpenAIChat(BaseLLM): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-16
To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" [docs] @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-17
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-18
except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-19
# for ChatGPT api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-20
messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: await run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" [docs] def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html
702a273e9d24-21
return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html