index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/titan_takeoff.py | from enum import Enum
from typing import Any, Iterator, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import BaseModel, ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
class Device(str, Enum):
"""The device to use for inference, cuda or cpu"""
cuda = "cuda"
cpu = "cpu"
class ReaderConfig(BaseModel):
"""Configuration for the reader to be deployed in Titan Takeoff API."""
model_config = ConfigDict(
protected_namespaces=(),
)
model_name: str
"""The name of the model to use"""
device: Device = Device.cuda
"""The device to use for inference, cuda or cpu"""
consumer_group: str = "primary"
"""The consumer group to place the reader into"""
tensor_parallel: Optional[int] = None
"""The number of gpus you would like your model to be split across"""
max_seq_length: int = 512
"""The maximum sequence length to use for inference, defaults to 512"""
max_batch_size: int = 4
"""The max batch size for continuous batching of requests"""
class TitanTakeoff(LLM):
"""Titan Takeoff API LLMs.
Titan Takeoff is a wrapper to interface with Takeoff Inference API for
generative text to text language models.
You can use this wrapper to send requests to a generative language model
and to deploy readers with Takeoff.
Examples:
This is an example how to deploy a generative language model and send
requests.
.. code-block:: python
# Import the TitanTakeoff class from community package
import time
from langchain_community.llms import TitanTakeoff
# Specify the embedding reader you'd like to deploy
reader_1 = {
"model_name": "TheBloke/Llama-2-7b-Chat-AWQ",
"device": "cuda",
"tensor_parallel": 1,
"consumer_group": "llama"
}
# For every reader you pass into models arg Takeoff will spin
# up a reader according to the specs you provide. If you don't
# specify the arg no models are spun up and it assumes you have
# already done this separately.
llm = TitanTakeoff(models=[reader_1])
# Wait for the reader to be deployed, time needed depends on the
# model size and your internet speed
time.sleep(60)
# Returns the query, ie a List[float], sent to `llama` consumer group
# where we just spun up the Llama 7B model
print(embed.invoke(
"Where can I see football?", consumer_group="llama"
))
# You can also send generation parameters to the model, any of the
# following can be passed in as kwargs:
# https://docs.titanml.co/docs/next/apis/Takeoff%20inference_REST_API/generate#request
# for instance:
print(embed.invoke(
"Where can I see football?", consumer_group="llama", max_new_tokens=100
))
"""
base_url: str = "http://localhost"
"""The base URL of the Titan Takeoff (Pro) server. Default = "http://localhost"."""
port: int = 3000
"""The port of the Titan Takeoff (Pro) server. Default = 3000."""
mgmt_port: int = 3001
"""The management port of the Titan Takeoff (Pro) server. Default = 3001."""
streaming: bool = False
"""Whether to stream the output. Default = False."""
client: Any = None
"""Takeoff Client Python SDK used to interact with Takeoff API"""
def __init__(
self,
base_url: str = "http://localhost",
port: int = 3000,
mgmt_port: int = 3001,
streaming: bool = False,
models: List[ReaderConfig] = [],
):
"""Initialize the Titan Takeoff language wrapper.
Args:
base_url (str, optional): The base URL where the Takeoff
Inference Server is listening. Defaults to `http://localhost`.
port (int, optional): What port is Takeoff Inference API
listening on. Defaults to 3000.
mgmt_port (int, optional): What port is Takeoff Management API
listening on. Defaults to 3001.
streaming (bool, optional): Whether you want to by default use the
generate_stream endpoint over generate to stream responses.
Defaults to False. In reality, this is not significantly different
as the streamed response is buffered and returned similar to the
non-streamed response, but the run manager is applied per token
generated.
models (List[ReaderConfig], optional): Any readers you'd like to
spin up on. Defaults to [].
Raises:
ImportError: If you haven't installed takeoff-client, you will
get an ImportError. To remedy run `pip install 'takeoff-client==0.4.0'`
"""
super().__init__( # type: ignore[call-arg]
base_url=base_url, port=port, mgmt_port=mgmt_port, streaming=streaming
)
try:
from takeoff_client import TakeoffClient
except ImportError:
raise ImportError(
"takeoff-client is required for TitanTakeoff. "
"Please install it with `pip install 'takeoff-client>=0.4.0'`."
)
self.client = TakeoffClient(
self.base_url, port=self.port, mgmt_port=self.mgmt_port
)
for model in models:
self.client.create_reader(model)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "titan_takeoff"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager to use when streaming.
Returns:
The string generated by the model.
Example:
.. code-block:: python
model = TitanTakeoff()
prompt = "What is the capital of the United Kingdom?"
# Use of model(prompt), ie `__call__` was deprecated in LangChain 0.1.7,
# use model.invoke(prompt) instead.
response = model.invoke(prompt)
"""
if self.streaming:
text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
):
text_output += chunk.text
return text_output
response = self.client.generate(prompt, **kwargs)
text = response["text"]
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager to use when streaming.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
model = TitanTakeoff()
prompt = "What is the capital of the United Kingdom?"
response = model.stream(prompt)
# OR
model = TitanTakeoff(streaming=True)
response = model.invoke(prompt)
"""
response = self.client.generate_stream(prompt, **kwargs)
buffer = ""
for text in response:
buffer += text.data
if "data:" in buffer:
# Remove the first instance of "data:" from the buffer.
if buffer.startswith("data:"):
buffer = ""
if len(buffer.split("data:", 1)) == 2:
content, _ = buffer.split("data:", 1)
buffer = content.rstrip("\n")
# Trim the buffer to only have content after the "data:" part.
if buffer: # Ensure that there's content to process.
chunk = GenerationChunk(text=buffer)
buffer = "" # Reset buffer for the next set of data.
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
yield chunk
# Yield any remaining content in the buffer.
if buffer:
chunk = GenerationChunk(text=buffer.replace("</s>", ""))
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/human.py | from typing import Any, Callable, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import Field
from langchain_community.llms.utils import enforce_stop_tokens
def _display_prompt(prompt: str) -> None:
"""Displays the given prompt to the user."""
print(f"\n{prompt}") # noqa: T201
def _collect_user_input(
separator: Optional[str] = None, stop: Optional[List[str]] = None
) -> str:
"""Collects and returns user input as a single string."""
separator = separator or "\n"
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
if stop and any(seq in line for seq in stop):
break
# Combine all lines into a single string
multi_line_input = separator.join(lines)
return multi_line_input
class HumanInputLLM(LLM):
"""User input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_user_input)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
prompt_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""
Returns an empty dictionary as there are no identifying parameters.
"""
return {}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
"""
self.prompt_func(prompt, **self.prompt_kwargs)
user_input = self.input_func(
separator=self.separator, stop=stop, **self.input_kwargs
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the human themselves
user_input = enforce_stop_tokens(user_input, stop)
return user_input
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/azureml_endpoint.py | import json
import urllib.request
import warnings
from abc import abstractmethod
from enum import Enum
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator, validator
DEFAULT_TIMEOUT = 50
class AzureMLEndpointClient(object):
"""AzureML Managed Endpoint client."""
def __init__(
self,
endpoint_url: str,
endpoint_api_key: str,
deployment_name: str = "",
timeout: int = DEFAULT_TIMEOUT,
) -> None:
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
self.timeout = timeout
def call(
self,
body: bytes,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> bytes:
"""call."""
# The azureml-model-deployment header will force the request to go to a
# specific deployment. Remove this header to have the request observe the
# endpoint traffic rules.
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + self.endpoint_api_key),
}
if self.deployment_name != "":
headers["azureml-model-deployment"] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(
req, timeout=kwargs.get("timeout", self.timeout)
)
result = response.read()
return result
class AzureMLEndpointApiType(str, Enum):
"""Azure ML endpoints API types. Use `dedicated` for models deployed in hosted
infrastructure (also known as Online Endpoints in Azure Machine Learning),
or `serverless` for models deployed as a service with a
pay-as-you-go billing or PTU.
"""
dedicated = "dedicated"
realtime = "realtime" #: Deprecated
serverless = "serverless"
class ContentFormatterBase:
"""Transform request and response of AzureML endpoint to match with
required schema.
"""
"""
Example:
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict,
api_type: AzureMLEndpointApiType,
) -> bytes:
input_str = json.dumps(
{
"inputs": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(
self, output: str, api_type: AzureMLEndpointApiType
) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
"""
content_type: Optional[str] = "application/json"
"""The MIME type of the input data passed to the endpoint"""
accepts: Optional[str] = "application/json"
"""The MIME type of the response data returned from the endpoint"""
format_error_msg: str = (
"Error while formatting response payload for chat model of type "
" `{api_type}`. Are you using the right formatter for the deployed "
" model and endpoint type?"
)
@staticmethod
def escape_special_characters(prompt: str) -> str:
"""Escapes any special characters in `prompt`"""
escape_map = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
# Replace each occurrence of the specified characters with escaped versions
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
"""Supported APIs for the given formatter. Azure ML supports
deploying models using different hosting methods. Each method may have
a different API structure."""
return [AzureMLEndpointApiType.dedicated]
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict,
api_type: AzureMLEndpointApiType = AzureMLEndpointApiType.dedicated,
) -> Any:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
raise NotImplementedError()
@abstractmethod
def format_response_payload(
self,
output: bytes,
api_type: AzureMLEndpointApiType = AzureMLEndpointApiType.dedicated,
) -> Generation:
"""Formats the response body according to the output
schema of the model. Returns the data type that is
received from the response.
"""
class GPT2ContentFormatter(ContentFormatterBase):
"""Content handler for GPT2"""
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.dedicated]
def format_request_payload( # type: ignore[override]
self, prompt: str, model_kwargs: Dict, api_type: AzureMLEndpointApiType
) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": {"input_string": [f'"{prompt}"']}, "parameters": model_kwargs}
)
return str.encode(request_payload)
def format_response_payload( # type: ignore[override]
self, output: bytes, api_type: AzureMLEndpointApiType
) -> Generation:
try:
choice = json.loads(output)[0]["0"]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return Generation(text=choice)
class OSSContentFormatter(GPT2ContentFormatter):
"""Deprecated: Kept for backwards compatibility
Content handler for LLMs from the OSS catalog."""
content_formatter: Any = None
def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
class HFContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the HuggingFace catalog."""
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.dedicated]
def format_request_payload( # type: ignore[override]
self, prompt: str, model_kwargs: Dict, api_type: AzureMLEndpointApiType
) -> bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": [f'"{prompt}"'], "parameters": model_kwargs}
)
return str.encode(request_payload)
def format_response_payload( # type: ignore[override]
self, output: bytes, api_type: AzureMLEndpointApiType
) -> Generation:
try:
choice = json.loads(output)[0]["0"]["generated_text"]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return Generation(text=choice)
class DollyContentFormatter(ContentFormatterBase):
"""Content handler for the Dolly-v2-12b model"""
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.dedicated]
def format_request_payload( # type: ignore[override]
self, prompt: str, model_kwargs: Dict, api_type: AzureMLEndpointApiType
) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {"input_string": [f'"{prompt}"']},
"parameters": model_kwargs,
}
)
return str.encode(request_payload)
def format_response_payload( # type: ignore[override]
self, output: bytes, api_type: AzureMLEndpointApiType
) -> Generation:
try:
choice = json.loads(output)[0]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return Generation(text=choice)
class CustomOpenAIContentFormatter(ContentFormatterBase):
"""Content formatter for models that use the OpenAI like API scheme."""
@property
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.dedicated, AzureMLEndpointApiType.serverless]
def format_request_payload( # type: ignore[override]
self, prompt: str, model_kwargs: Dict, api_type: AzureMLEndpointApiType
) -> bytes:
"""Formats the request according to the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
if api_type in [
AzureMLEndpointApiType.dedicated,
AzureMLEndpointApiType.realtime,
]:
request_payload = json.dumps(
{
"input_data": {
"input_string": [f'"{prompt}"'],
"parameters": model_kwargs,
}
}
)
elif api_type == AzureMLEndpointApiType.serverless:
request_payload = json.dumps({"prompt": prompt, **model_kwargs})
else:
raise ValueError(
f"`api_type` {api_type} is not supported by this formatter"
)
return str.encode(request_payload)
def format_response_payload( # type: ignore[override]
self, output: bytes, api_type: AzureMLEndpointApiType
) -> Generation:
"""Formats response"""
if api_type in [
AzureMLEndpointApiType.dedicated,
AzureMLEndpointApiType.realtime,
]:
try:
choice = json.loads(output)[0]["0"]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return Generation(text=choice)
if api_type == AzureMLEndpointApiType.serverless:
try:
choice = json.loads(output)["choices"][0]
if not isinstance(choice, dict):
raise TypeError(
"Endpoint response is not well formed for a chat "
"model. Expected `dict` but `{type(choice)}` was "
"received."
)
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return Generation(
text=choice["text"].strip(),
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
raise ValueError(f"`api_type` {api_type} is not supported by this formatter")
class LlamaContentFormatter(CustomOpenAIContentFormatter):
"""Deprecated: Kept for backwards compatibility
Content formatter for Llama."""
content_formatter: Any = None
def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`LlamaContentFormatter` will be deprecated in the future.
Please use `CustomOpenAIContentFormatter` instead.
"""
)
class AzureMLBaseEndpoint(BaseModel):
"""Azure ML Online Endpoint models."""
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_type: AzureMLEndpointApiType = AzureMLEndpointApiType.dedicated
"""Type of the endpoint being consumed. Possible values are `serverless` for
pay-as-you-go and `dedicated` for dedicated endpoints. """
endpoint_api_key: SecretStr = convert_to_secret_str("")
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
deployment_name: str = ""
"""Deployment Name for Endpoint. NOT REQUIRED to call endpoint. Should be passed
to constructor or specified as env var `AZUREML_DEPLOYMENT_NAME`."""
timeout: int = DEFAULT_TIMEOUT
"""Request timeout for calls to the endpoint"""
http_client: Any = None #: :meta private:
max_retries: int = 1
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environ(cls, values: Dict) -> Any:
values["endpoint_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY")
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
values["deployment_name"] = get_from_dict_or_env(
values, "deployment_name", "AZUREML_DEPLOYMENT_NAME", ""
)
values["endpoint_api_type"] = get_from_dict_or_env(
values,
"endpoint_api_type",
"AZUREML_ENDPOINT_API_TYPE",
AzureMLEndpointApiType.dedicated,
)
values["timeout"] = get_from_dict_or_env(
values,
"timeout",
"AZUREML_TIMEOUT",
str(DEFAULT_TIMEOUT),
)
return values
@validator("content_formatter")
def validate_content_formatter(
cls, field_value: Any, values: Dict
) -> ContentFormatterBase:
"""Validate that content formatter is supported by endpoint type."""
endpoint_api_type = values.get("endpoint_api_type")
if endpoint_api_type not in field_value.supported_api_types:
raise ValueError(
f"Content formatter f{type(field_value)} is not supported by this "
f"endpoint. Supported types are {field_value.supported_api_types} "
f"but endpoint is {endpoint_api_type}."
)
return field_value
@validator("endpoint_url")
def validate_endpoint_url(cls, field_value: Any) -> str:
"""Validate that endpoint url is complete."""
if field_value.endswith("/"):
field_value = field_value[:-1]
if field_value.endswith("inference.ml.azure.com"):
raise ValueError(
"`endpoint_url` should contain the full invocation URL including "
"`/score` for `endpoint_api_type='dedicated'` or `/completions` "
"or `/chat/completions` for `endpoint_api_type='serverless'`"
)
return field_value
@validator("endpoint_api_type")
def validate_endpoint_api_type(
cls, field_value: Any, values: Dict
) -> AzureMLEndpointApiType:
"""Validate that endpoint api type is compatible with the URL format."""
endpoint_url = values.get("endpoint_url")
if (
(
field_value == AzureMLEndpointApiType.dedicated
or field_value == AzureMLEndpointApiType.realtime
)
and not endpoint_url.endswith("/score") # type: ignore[union-attr]
):
raise ValueError(
"Endpoints of type `dedicated` should follow the format "
"`https://<your-endpoint>.<your_region>.inference.ml.azure.com/score`."
" If your endpoint URL ends with `/completions` or"
"`/chat/completions`, use `endpoint_api_type='serverless'` instead."
)
if field_value == AzureMLEndpointApiType.serverless and not (
endpoint_url.endswith("/completions") # type: ignore[union-attr]
or endpoint_url.endswith("/chat/completions") # type: ignore[union-attr]
):
raise ValueError(
"Endpoints of type `serverless` should follow the format "
"`https://<your-endpoint>.<your_region>.inference.ml.azure.com/chat/completions`"
" or `https://<your-endpoint>.<your_region>.inference.ml.azure.com/chat/completions`"
)
return field_value
@validator("http_client", always=True)
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exists in environment."""
endpoint_url = values.get("endpoint_url")
endpoint_key = values.get("endpoint_api_key")
deployment_name = values.get("deployment_name")
timeout = values.get("timeout", DEFAULT_TIMEOUT)
http_client = AzureMLEndpointClient(
endpoint_url, # type: ignore
endpoint_key.get_secret_value(), # type: ignore
deployment_name, # type: ignore
timeout, # type: ignore
)
return http_client
class AzureMLOnlineEndpoint(BaseLLM, AzureMLBaseEndpoint):
"""Azure ML Online Endpoint models.
Example:
.. code-block:: python
azure_llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_type=AzureMLApiType.dedicated,
endpoint_api_key="my-api-key",
timeout=120,
content_formatter=content_formatter,
)
"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"deployment_name": self.deployment_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_endpoint"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompts.
Args:
prompts: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model.invoke("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs.update(kwargs)
if stop:
_model_kwargs["stop"] = stop
generations = []
for prompt in prompts:
request_payload = self.content_formatter.format_request_payload(
prompt, _model_kwargs, self.endpoint_api_type
)
response_payload = self.http_client.call(
body=request_payload, run_manager=run_manager
)
generated_text = self.content_formatter.format_response_payload(
response_payload, self.endpoint_api_type
)
generations.append([generated_text])
return LLMResult(generations=generations)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/yandex.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.load.serializable import Serializable
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import SecretStr
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class _BaseYandexGPT(Serializable):
iam_token: SecretStr = "" # type: ignore[assignment]
"""Yandex Cloud IAM token for service or user account
with the `ai.languageModels.user` role"""
api_key: SecretStr = "" # type: ignore[assignment]
"""Yandex Cloud Api Key for service account
with the `ai.languageModels.user` role"""
folder_id: str = ""
"""Yandex Cloud folder ID"""
model_uri: str = ""
"""Model uri to use."""
model_name: str = "yandexgpt-lite"
"""Model name to use."""
model_version: str = "latest"
"""Model version to use."""
temperature: float = 0.6
"""What sampling temperature to use.
Should be a double number between 0 (inclusive) and 1 (inclusive)."""
max_tokens: int = 7400
"""Sets the maximum limit on the total number of tokens
used for both the input prompt and the generated response.
Must be greater than zero and not exceed 7400 tokens."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
url: str = "llm.api.cloud.yandex.net:443"
"""The url of the API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
sleep_interval: float = 1.0
"""Delay between API requests"""
disable_request_logging: bool = False
"""YandexGPT API logs all request data by default.
If you provide personal data, confidential information, disable logging."""
grpc_metadata: Optional[Sequence] = None
@property
def _llm_type(self) -> str:
return "yandex_gpt"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model_uri": self.model_uri,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"stop": self.stop,
"max_retries": self.max_retries,
}
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that iam token exists in environment."""
iam_token = convert_to_secret_str(
get_from_dict_or_env(values, "iam_token", "YC_IAM_TOKEN", "")
)
values["iam_token"] = iam_token
api_key = convert_to_secret_str(
get_from_dict_or_env(values, "api_key", "YC_API_KEY", "")
)
values["api_key"] = api_key
folder_id = get_from_dict_or_env(values, "folder_id", "YC_FOLDER_ID", "")
values["folder_id"] = folder_id
if api_key.get_secret_value() == "" and iam_token.get_secret_value() == "":
raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.")
if values["iam_token"]:
values["grpc_metadata"] = [
("authorization", f"Bearer {values['iam_token'].get_secret_value()}")
]
if values["folder_id"]:
values["grpc_metadata"].append(("x-folder-id", values["folder_id"]))
else:
values["grpc_metadata"] = [
("authorization", f"Api-Key {values['api_key'].get_secret_value()}"),
]
if values["model_uri"] == "" and values["folder_id"] == "":
raise ValueError("Either 'model_uri' or 'folder_id' must be provided.")
if not values["model_uri"]:
values["model_uri"] = (
f"gpt://{values['folder_id']}/{values['model_name']}/{values['model_version']}"
)
if values["disable_request_logging"]:
values["grpc_metadata"].append(
(
"x-data-logging-enabled",
"false",
)
)
return values
class YandexGPT(_BaseYandexGPT, LLM):
"""Yandex large language models.
To use, you should have the ``yandexcloud`` python package installed.
There are two authentication options for the service account
with the ``ai.languageModels.user`` role:
- You can specify the token in a constructor parameter `iam_token`
or in an environment variable `YC_IAM_TOKEN`.
- You can specify the key in a constructor parameter `api_key`
or in an environment variable `YC_API_KEY`.
To use the default model specify the folder ID in a parameter `folder_id`
or in an environment variable `YC_FOLDER_ID`.
Or specify the model URI in a constructor parameter `model_uri`
Example:
.. code-block:: python
from langchain_community.llms import YandexGPT
yandex_gpt = YandexGPT(iam_token="t1.9eu...", folder_id="b1g...")
"""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = YandexGPT("Tell me a joke.")
"""
text = completion_with_retry(self, prompt=prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
text = await acompletion_with_retry(self, prompt=prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _make_request(
self: YandexGPT,
prompt: str,
) -> str:
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
try:
from yandex.cloud.ai.foundation_models.v1.text_common_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2 import ( # noqa: E501
CompletionRequest,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpc import ( # noqa: E501
TextGenerationServiceStub,
)
except ModuleNotFoundError:
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501
CompletionRequest,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501
TextGenerationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK with `pip install yandexcloud` \
or upgrade it to recent version."
) from e
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = CompletionRequest(
model_uri=self.model_uri,
completion_options=CompletionOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
messages=[Message(role="user", text=prompt)],
)
stub = TextGenerationServiceStub(channel)
res = stub.Completion(request, metadata=self.grpc_metadata) # type: ignore[attr-defined]
return list(res)[0].alternatives[0].message.text
async def _amake_request(self: YandexGPT, prompt: str) -> str:
try:
import asyncio
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
try:
from yandex.cloud.ai.foundation_models.v1.text_common_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2 import ( # noqa: E501
CompletionRequest,
CompletionResponse,
)
from yandex.cloud.ai.foundation_models.v1.text_generation.text_generation_service_pb2_grpc import ( # noqa: E501
TextGenerationAsyncServiceStub,
)
except ModuleNotFoundError:
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import (
CompletionOptions,
Message,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501
CompletionRequest,
CompletionResponse,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501
TextGenerationAsyncServiceStub,
)
from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest
from yandex.cloud.operation.operation_service_pb2_grpc import (
OperationServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK with `pip install yandexcloud` \
or upgrade it to recent version."
) from e
operation_api_url = "operation.api.cloud.yandex.net:443"
channel_credentials = grpc.ssl_channel_credentials()
async with grpc.aio.secure_channel(self.url, channel_credentials) as channel:
request = CompletionRequest(
model_uri=self.model_uri,
completion_options=CompletionOptions(
temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens),
),
messages=[Message(role="user", text=prompt)],
)
stub = TextGenerationAsyncServiceStub(channel)
operation = await stub.Completion(request, metadata=self.grpc_metadata) # type: ignore[attr-defined]
async with grpc.aio.secure_channel(
operation_api_url, channel_credentials
) as operation_channel:
operation_stub = OperationServiceStub(operation_channel)
while not operation.done:
await asyncio.sleep(1)
operation_request = GetOperationRequest(operation_id=operation.id)
operation = await operation_stub.Get(
operation_request,
metadata=self.grpc_metadata, # type: ignore[attr-defined]
)
completion_response = CompletionResponse()
operation.response.Unpack(completion_response)
return completion_response.alternatives[0].message.text
def _create_retry_decorator(llm: YandexGPT) -> Callable[[Any], Any]:
from grpc import RpcError
min_seconds = llm.sleep_interval
max_seconds = 60
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type((RpcError))),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: YandexGPT, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return _make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(llm: YandexGPT, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**_kwargs: Any) -> Any:
return await _amake_request(llm, **_kwargs)
return await _completion_with_retry(**kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/openllm.py | from __future__ import annotations
import copy
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
TypedDict,
Union,
overload,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict, PrivateAttr
if TYPE_CHECKING:
import openllm
ServerType = Literal["http", "grpc"]
class IdentifyingParams(TypedDict):
"""Parameters for identifying a model as a typed dict."""
model_name: str
model_id: Optional[str]
server_url: Optional[str]
server_type: Optional[ServerType]
embedded: bool
llm_kwargs: Dict[str, Any]
logger = logging.getLogger(__name__)
class OpenLLM(LLM):
"""OpenLLM, supporting both in-process model
instance and remote OpenLLM servers.
To use, you should have the openllm library installed:
.. code-block:: bash
pip install openllm
Learn more at: https://github.com/bentoml/openllm
Example running an LLM model locally managed by OpenLLM:
.. code-block:: python
from langchain_community.llms import OpenLLM
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
)
llm.invoke("What is the difference between a duck and a goose?")
For all available supported models, you can run 'openllm models'.
If you have a OpenLLM server running, you can also use it remotely:
.. code-block:: python
from langchain_community.llms import OpenLLM
llm = OpenLLM(server_url='http://localhost:3000')
llm.invoke("What is the difference between a duck and a goose?")
"""
model_name: Optional[str] = None
"""Model name to use. See 'openllm models' for all available models."""
model_id: Optional[str] = None
"""Model Id to use. If not provided, will use the default model for the model name.
See 'openllm models' for all available model variants."""
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
timeout: int = 30
""""Time out for the openllm client"""
server_type: ServerType = "http"
"""Optional server type. Either 'http' or 'grpc'."""
embedded: bool = True
"""Initialize this LLM instance in current process by default. Should
only set to False when using in conjunction with BentoML Service."""
llm_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to openllm.LLM"""
_runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None)
_client: Union[openllm.client.HTTPClient, openllm.client.GrpcClient, None] = (
PrivateAttr(default=None)
)
model_config = ConfigDict(
extra="forbid",
)
@overload
def __init__(
self,
model_name: Optional[str] = ...,
*,
model_id: Optional[str] = ...,
embedded: Literal[True, False] = ...,
**llm_kwargs: Any,
) -> None: ...
@overload
def __init__(
self,
*,
server_url: str = ...,
server_type: Literal["grpc", "http"] = ...,
**llm_kwargs: Any,
) -> None: ...
def __init__(
self,
model_name: Optional[str] = None,
*,
model_id: Optional[str] = None,
server_url: Optional[str] = None,
timeout: int = 30,
server_type: Literal["grpc", "http"] = "http",
embedded: bool = True,
**llm_kwargs: Any,
):
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm.'"
) from e
llm_kwargs = llm_kwargs or {}
if server_url is not None:
logger.debug("'server_url' is provided, returning a openllm.Client")
assert (
model_id is None and model_name is None
), "'server_url' and {'model_id', 'model_name'} are mutually exclusive"
client_cls = (
openllm.client.HTTPClient
if server_type == "http"
else openllm.client.GrpcClient
)
client = client_cls(server_url, timeout)
super().__init__(
**{ # type: ignore[arg-type]
"server_url": server_url,
"timeout": timeout,
"server_type": server_type,
"llm_kwargs": llm_kwargs,
}
)
self._runner = None # type: ignore
self._client = client
else:
assert model_name is not None, "Must provide 'model_name' or 'server_url'"
# since the LLM are relatively huge, we don't actually want to convert the
# Runner with embedded when running the server. Instead, we will only set
# the init_local here so that LangChain users can still use the LLM
# in-process. Wrt to BentoML users, setting embedded=False is the expected
# behaviour to invoke the runners remotely.
# We need to also enable ensure_available to download and setup the model.
runner = openllm.Runner(
model_name=model_name,
model_id=model_id,
init_local=embedded,
ensure_available=True,
**llm_kwargs,
)
super().__init__(
**{ # type: ignore[arg-type]
"model_name": model_name,
"model_id": model_id,
"embedded": embedded,
"llm_kwargs": llm_kwargs,
}
)
self._client = None # type: ignore
self._runner = runner
@property
def runner(self) -> openllm.LLMRunner:
"""
Get the underlying openllm.LLMRunner instance for integration with BentoML.
Example:
.. code-block:: python
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
embedded=False,
)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
svc = bentoml.Service("langchain-openllm", runners=[llm.runner])
@svc.api(input=Text(), output=Text())
def chat(input_text: str):
return agent.run(input_text)
"""
if self._runner is None:
raise ValueError("OpenLLM must be initialized locally with 'model_name'")
return self._runner
@property
def _identifying_params(self) -> IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config)
model_name = self._client._metadata.model_dump()["model_name"]
model_id = self._client._metadata.model_dump()["model_id"]
else:
if self._runner is None:
raise ValueError("Runner must be initialized.")
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(
json.loads(self._runner.identifying_params["configuration"])
)
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(
server_url=self.server_url,
server_type=self.server_type,
embedded=self.embedded,
llm_kwargs=self.llm_kwargs,
model_name=model_name,
model_id=model_id,
)
@property
def _llm_type(self) -> str:
return "openllm_client" if self._client else "openllm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
res = (
self._client.generate(prompt, **config.model_dump(flatten=True))
.outputs[0]
.text
)
else:
assert self._runner is not None
res = self._runner(prompt, **config.model_dump(flatten=True))
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
async_client = openllm.client.AsyncHTTPClient(self.server_url, self.timeout)
res = (
(await async_client.generate(prompt, **config.model_dump(flatten=True)))
.outputs[0]
.text
)
else:
assert self._runner is not None
(
prompt,
generate_kwargs,
postprocess_kwargs,
) = self._runner.llm.sanitize_parameters(prompt, **kwargs)
generated_result = await self._runner.generate.async_run(
prompt, **generate_kwargs
)
res = self._runner.llm.postprocess_generate(
prompt, generated_result, **postprocess_kwargs
)
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/predictionguard.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PredictionGuard(LLM):
"""Prediction Guard large language models.
To use, you should have the ``predictionguard`` python package installed, and the
environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass
it as a named parameter to the constructor. To use Prediction Guard's API along
with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your
OpenAI API key as well.
Example:
.. code-block:: python
pgllm = PredictionGuard(model="MPT-7B-Instruct",
token="my-access-token",
output={
"type": "boolean"
})
"""
client: Any = None #: :meta private:
model: Optional[str] = "MPT-7B-Instruct"
"""Model name to use."""
output: Optional[Dict[str, Any]] = None
"""The output type or structure for controlling the LLM output."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
token: Optional[str] = None
"""Your Prediction Guard access token."""
stop: Optional[List[str]] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the access token and python package exists in environment."""
token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN")
try:
import predictionguard as pg
values["client"] = pg.Client(token=token)
except ImportError:
raise ImportError(
"Could not import predictionguard python package. "
"Please install it with `pip install predictionguard`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Prediction Guard API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "predictionguard"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Prediction Guard's model API.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = pgllm.invoke("Tell me a joke.")
"""
import predictionguard as pg
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = pg.Completion.create(
model=self.model,
prompt=prompt,
output=self.output,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
**kwargs,
)
text = response["choices"][0]["text"]
# If stop tokens are provided, Prediction Guard's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/beam.py | import base64
import json
import logging
import subprocess
import textwrap
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
DEFAULT_NUM_TRIES = 10
DEFAULT_SLEEP_TIME = 4
class Beam(LLM): # type: ignore[override, override, override, override]
"""Beam API for gpt2 large language model.
To use, you should have the ``beam-sdk`` python package installed,
and the environment variable ``BEAM_CLIENT_ID`` set with your client id
and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how
to get this is available here: https://docs.beam.cloud/account/api-keys.
The wrapper can then be called as follows, where the name, cpu, memory, gpu,
python version, and python packages can be updated accordingly. Once deployed,
the instance can be called.
Example:
.. code-block:: python
llm = Beam(model_name="gpt2",
name="langchain-gpt2",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length=50)
llm._deploy()
call_result = llm._call(input)
"""
model_name: str = ""
name: str = ""
cpu: str = ""
memory: str = ""
gpu: str = ""
python_version: str = ""
python_packages: List[str] = []
max_length: str = ""
url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
beam_client_id: str = ""
beam_client_secret: str = ""
app_id: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in get_fields(cls).values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(
values, "beam_client_id", "BEAM_CLIENT_ID"
)
beam_client_secret = get_from_dict_or_env(
values, "beam_client_secret", "BEAM_CLIENT_SECRET"
)
values["beam_client_id"] = beam_client_id
values["beam_client_secret"] = beam_client_secret
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"name": self.name,
"cpu": self.cpu,
"memory": self.memory,
"gpu": self.gpu,
"python_version": self.python_version,
"python_packages": self.python_packages,
"max_length": self.max_length,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "beam"
def app_creation(self) -> None:
"""Creates a Python file which will contain your Beam app definition."""
script = textwrap.dedent(
"""\
import beam
# The environment your code will run on
app = beam.App(
name="{name}",
cpu={cpu},
memory="{memory}",
gpu="{gpu}",
python_version="{python_version}",
python_packages={python_packages},
)
app.Trigger.RestAPI(
inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}},
outputs={{"text": beam.Types.String()}},
handler="run.py:beam_langchain",
)
"""
)
script_name = "app.py"
with open(script_name, "w") as file:
file.write(
script.format(
name=self.name,
cpu=self.cpu,
memory=self.memory,
gpu=self.gpu,
python_version=self.python_version,
python_packages=self.python_packages,
)
)
def run_creation(self) -> None:
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output) # noqa: T201
return {{"text": output}}
"""
)
script_name = "run.py"
with open(script_name, "w") as file:
file.write(script.format(model_name=self.model_name))
def _deploy(self) -> str:
"""Call to Beam."""
try:
import beam # type: ignore
if beam.__path__ == "":
raise ImportError
except ImportError:
raise ImportError(
"Could not import beam python package. "
"Please install it with `curl "
"https://raw.githubusercontent.com/slai-labs"
"/get-beam/main/get-beam.sh -sSfL | sh`."
)
self.app_creation()
self.run_creation()
process = subprocess.run(
"beam deploy app.py", shell=True, capture_output=True, text=True
)
if process.returncode == 0:
output = process.stdout
logger.info(output)
lines = output.split("\n")
for line in lines:
if line.startswith(" i Send requests to: https://apps.beam.cloud/"):
self.app_id = line.split("/")[-1]
self.url = line.split(":")[1].strip()
return self.app_id
raise ValueError(
f"""Failed to retrieve the appID from the deployment output.
Deployment output: {output}"""
)
else:
raise ValueError(f"Deployment failed. Error: {process.stderr}")
@property
def authorization(self) -> str:
if self.beam_client_id:
credential_str = self.beam_client_id + ":" + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
def _call(
self,
prompt: str,
stop: Optional[list] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Beam."""
url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url
payload = {"prompt": prompt, "max_length": self.max_length}
payload.update(kwargs)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Authorization": "Basic " + self.authorization,
"Connection": "keep-alive",
"Content-Type": "application/json",
}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["text"]
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning("Unable to successfully call model.")
return ""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/gigachat.py | from __future__ import annotations
import logging
from functools import cached_property
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM
from langchain_core.load.serializable import Serializable
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict
if TYPE_CHECKING:
import gigachat
import gigachat.models as gm
logger = logging.getLogger(__name__)
class _BaseGigaChat(Serializable):
base_url: Optional[str] = None
""" Base API URL """
auth_url: Optional[str] = None
""" Auth URL """
credentials: Optional[str] = None
""" Auth Token """
scope: Optional[str] = None
""" Permission scope for access token """
access_token: Optional[str] = None
""" Access token for GigaChat """
model: Optional[str] = None
"""Model name to use."""
user: Optional[str] = None
""" Username for authenticate """
password: Optional[str] = None
""" Password for authenticate """
timeout: Optional[float] = None
""" Timeout for request """
verify_ssl_certs: Optional[bool] = None
""" Check certificates for all requests """
ca_bundle_file: Optional[str] = None
cert_file: Optional[str] = None
key_file: Optional[str] = None
key_file_password: Optional[str] = None
# Support for connection to GigaChat through SSL certificates
profanity: bool = True
""" DEPRECATED: Check for profanity """
profanity_check: Optional[bool] = None
""" Check for profanity """
streaming: bool = False
""" Whether to stream the results or not. """
temperature: Optional[float] = None
""" What sampling temperature to use. """
max_tokens: Optional[int] = None
""" Maximum number of tokens to generate """
use_api_for_tokens: bool = False
""" Use GigaChat API for tokens count """
verbose: bool = False
""" Verbose logging """
top_p: Optional[float] = None
""" top_p value to use for nucleus sampling. Must be between 0.0 and 1.0 """
repetition_penalty: Optional[float] = None
""" The penalty applied to repeated tokens """
update_interval: Optional[float] = None
""" Minimum interval in seconds that elapses between sending tokens """
@property
def _llm_type(self) -> str:
return "giga-chat-model"
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"credentials": "GIGACHAT_CREDENTIALS",
"access_token": "GIGACHAT_ACCESS_TOKEN",
"password": "GIGACHAT_PASSWORD",
"key_file_password": "GIGACHAT_KEY_FILE_PASSWORD",
}
@property
def lc_serializable(self) -> bool:
return True
@cached_property
def _client(self) -> gigachat.GigaChat:
"""Returns GigaChat API client"""
import gigachat
return gigachat.GigaChat(
base_url=self.base_url,
auth_url=self.auth_url,
credentials=self.credentials,
scope=self.scope,
access_token=self.access_token,
model=self.model,
profanity_check=self.profanity_check,
user=self.user,
password=self.password,
timeout=self.timeout,
verify_ssl_certs=self.verify_ssl_certs,
ca_bundle_file=self.ca_bundle_file,
cert_file=self.cert_file,
key_file=self.key_file,
key_file_password=self.key_file_password,
verbose=self.verbose,
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate authenticate data in environment and python package is installed."""
try:
import gigachat # noqa: F401
except ImportError:
raise ImportError(
"Could not import gigachat python package. "
"Please install it with `pip install gigachat`."
)
fields = set(get_fields(cls).keys())
diff = set(values.keys()) - fields
if diff:
logger.warning(f"Extra fields {diff} in GigaChat class")
if "profanity" in fields and values.get("profanity") is False:
logger.warning(
"'profanity' field is deprecated. Use 'profanity_check' instead."
)
if values.get("profanity_check") is None:
values["profanity_check"] = values.get("profanity")
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"temperature": self.temperature,
"model": self.model,
"profanity": self.profanity_check,
"streaming": self.streaming,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"repetition_penalty": self.repetition_penalty,
}
def tokens_count(
self, input_: List[str], model: Optional[str] = None
) -> List[gm.TokensCount]:
"""Get tokens of string list"""
return self._client.tokens_count(input_, model)
async def atokens_count(
self, input_: List[str], model: Optional[str] = None
) -> List[gm.TokensCount]:
"""Get tokens of strings list (async)"""
return await self._client.atokens_count(input_, model)
def get_models(self) -> gm.Models:
"""Get available models of Gigachat"""
return self._client.get_models()
async def aget_models(self) -> gm.Models:
"""Get available models of Gigachat (async)"""
return await self._client.aget_models()
def get_model(self, model: str) -> gm.Model:
"""Get info about model"""
return self._client.get_model(model)
async def aget_model(self, model: str) -> gm.Model:
"""Get info about model (async)"""
return await self._client.aget_model(model)
def get_num_tokens(self, text: str) -> int:
"""Count approximate number of tokens"""
if self.use_api_for_tokens:
return self.tokens_count([text])[0].tokens # type: ignore
else:
return round(len(text) / 4.6)
class GigaChat(_BaseGigaChat, BaseLLM):
"""`GigaChat` large language models API.
To use, you should pass login and password to access GigaChat API or use token.
Example:
.. code-block:: python
from langchain_community.llms import GigaChat
giga = GigaChat(credentials=..., scope=..., verify_ssl_certs=False)
"""
payload_role: str = "user"
def _build_payload(self, messages: List[str]) -> Dict[str, Any]:
payload: Dict[str, Any] = {
"messages": [{"role": self.payload_role, "content": m} for m in messages],
}
if self.model:
payload["model"] = self.model
if self.profanity_check is not None:
payload["profanity_check"] = self.profanity_check
if self.temperature is not None:
payload["temperature"] = self.temperature
if self.top_p is not None:
payload["top_p"] = self.top_p
if self.max_tokens is not None:
payload["max_tokens"] = self.max_tokens
if self.repetition_penalty is not None:
payload["repetition_penalty"] = self.repetition_penalty
if self.update_interval is not None:
payload["update_interval"] = self.update_interval
if self.verbose:
logger.info("Giga request: %s", payload)
return payload
def _create_llm_result(self, response: Any) -> LLMResult:
generations = []
for res in response.choices:
finish_reason = res.finish_reason
gen = Generation(
text=res.message.content,
generation_info={"finish_reason": finish_reason},
)
generations.append([gen])
if finish_reason != "stop":
logger.warning(
"Giga generation stopped with reason: %s",
finish_reason,
)
if self.verbose:
logger.info("Giga response: %s", res.message.content)
token_usage = response.usage
llm_output = {"token_usage": token_usage, "model_name": response.model}
return LLMResult(generations=generations, llm_output=llm_output)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
generation: Optional[GenerationChunk] = None
stream_iter = self._stream(
prompts[0], stop=stop, run_manager=run_manager, **kwargs
)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
payload = self._build_payload(prompts)
response = self._client.chat(payload)
return self._create_llm_result(response)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
generation: Optional[GenerationChunk] = None
stream_iter = self._astream(
prompts[0], stop=stop, run_manager=run_manager, **kwargs
)
async for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
payload = self._build_payload(prompts)
response = await self._client.achat(payload)
return self._create_llm_result(response)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
payload = self._build_payload([prompt])
for chunk in self._client.stream(payload):
if chunk.choices:
content = chunk.choices[0].delta.content
if run_manager:
run_manager.on_llm_new_token(content)
yield GenerationChunk(text=content)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
payload = self._build_payload([prompt])
async for chunk in self._client.astream(payload):
if chunk.choices:
content = chunk.choices[0].delta.content
if run_manager:
await run_manager.on_llm_new_token(content)
yield GenerationChunk(text=content)
model_config = ConfigDict(
extra="allow",
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/yuan2.py | import json
import logging
from typing import Any, Dict, List, Mapping, Optional, Set
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import Field
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Yuan2(LLM):
"""Yuan2.0 language models.
Example:
.. code-block:: python
yuan_llm = Yuan2(
infer_api="http://127.0.0.1:8000/yuan",
max_tokens=1024,
temp=1.0,
top_p=0.9,
top_k=40,
)
print(yuan_llm)
print(yuan_llm.invoke("你是谁?"))
"""
infer_api: str = "http://127.0.0.1:8000/yuan"
"""Yuan2.0 inference api"""
max_tokens: int = Field(1024, alias="max_token")
"""Token context window."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.9
"""The top-p value to use for sampling."""
top_k: Optional[int] = 0
"""The top-k value to use for sampling."""
do_sample: bool = False
"""The do_sample is a Boolean value that determines whether
to use the sampling method during text generation.
"""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
streaming: bool = False
"""Whether to stream the results or not."""
history: List[str] = []
"""History of the conversation"""
use_history: bool = False
"""Whether to use history or not"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the Yuan2 class."""
super().__init__(**kwargs)
if (self.top_p or 0) > 0 and (self.top_k or 0) > 0:
logger.warning(
"top_p and top_k cannot be set simultaneously. "
"set top_k to 0 instead..."
)
self.top_k = 0
@property
def _llm_type(self) -> str:
return "Yuan2.0"
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"temp",
"top_k",
"top_p",
"do_sample",
}
def _default_params(self) -> Dict[str, Any]:
return {
"do_sample": self.do_sample,
"infer_api": self.infer_api,
"max_tokens": self.max_tokens,
"repeat_penalty": self.repeat_penalty,
"temp": self.temp,
"top_k": self.top_k,
"top_p": self.top_p,
"use_history": self.use_history,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self._llm_type,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a Yuan2.0 LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = yuan_llm.invoke("你能做什么?")
"""
if self.use_history:
self.history.append(prompt)
input = "<n>".join(self.history)
else:
input = prompt
headers = {"Content-Type": "application/json"}
data = json.dumps(
{
"ques_list": [{"id": "000", "ques": input}],
"tokens_to_generate": self.max_tokens,
"temperature": self.temp,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
)
logger.debug("Yuan2.0 prompt:", input)
# call api
try:
response = requests.put(self.infer_api, headers=headers, data=data)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference api: {e}")
logger.debug(f"Yuan2.0 response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
resp = response.json()
if resp["errCode"] != "0":
raise ValueError(
f"Failed with error code [{resp['errCode']}], "
f"error message: [{resp['exceptionMsg']}]"
)
if "resData" in resp:
if len(resp["resData"]["output"]) >= 0:
generate_text = resp["resData"]["output"][0]["ans"]
else:
raise ValueError("No output found in response.")
else:
raise ValueError("No resData found in response.")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference api: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
generate_text = enforce_stop_tokens(generate_text, stop)
# support multi-turn chat
if self.use_history:
self.history.append(generate_text)
logger.debug(f"history: {self.history}")
return generate_text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/volcengine_maas.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field, SecretStr
class VolcEngineMaasBase(BaseModel):
"""Base class for VolcEngineMaas models."""
model_config = ConfigDict(protected_namespaces=())
client: Any = None
volc_engine_maas_ak: Optional[SecretStr] = None
"""access key for volc engine"""
volc_engine_maas_sk: Optional[SecretStr] = None
"""secret key for volc engine"""
endpoint: Optional[str] = "maas-api.ml-platform-cn-beijing.volces.com"
"""Endpoint of the VolcEngineMaas LLM."""
region: Optional[str] = "Region"
"""Region of the VolcEngineMaas LLM."""
model: str = "skylark-lite-public"
"""Model name. you could check this model details here
https://www.volcengine.com/docs/82379/1133187
and you could choose other models by change this field"""
model_version: Optional[str] = None
"""Model version. Only used in moonshot large language model.
you could check details here https://www.volcengine.com/docs/82379/1158281"""
top_p: Optional[float] = 0.8
"""Total probability mass of tokens to consider at each step."""
temperature: Optional[float] = 0.95
"""A non-negative float that tunes the degree of randomness in generation."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""model special arguments, you could check detail on model page"""
streaming: bool = False
"""Whether to stream the results."""
connect_timeout: Optional[int] = 60
"""Timeout for connect to volc engine maas endpoint. Default is 60 seconds."""
read_timeout: Optional[int] = 60
"""Timeout for read response from volc engine maas endpoint.
Default is 60 seconds."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
volc_engine_maas_ak = convert_to_secret_str(
get_from_dict_or_env(values, "volc_engine_maas_ak", "VOLC_ACCESSKEY")
)
volc_engine_maas_sk = convert_to_secret_str(
get_from_dict_or_env(values, "volc_engine_maas_sk", "VOLC_SECRETKEY")
)
endpoint = values["endpoint"]
if values["endpoint"] is not None and values["endpoint"] != "":
endpoint = values["endpoint"]
try:
from volcengine.maas import MaasService
maas = MaasService(
endpoint,
values["region"],
connection_timeout=values["connect_timeout"],
socket_timeout=values["read_timeout"],
)
maas.set_ak(volc_engine_maas_ak.get_secret_value())
maas.set_sk(volc_engine_maas_sk.get_secret_value())
values["volc_engine_maas_ak"] = volc_engine_maas_ak
values["volc_engine_maas_sk"] = volc_engine_maas_sk
values["client"] = maas
except ImportError:
raise ImportError(
"volcengine package not found, please install it with "
"`pip install volcengine`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling VolcEngineMaas API."""
normal_params = {
"top_p": self.top_p,
"temperature": self.temperature,
}
return {**normal_params, **self.model_kwargs}
class VolcEngineMaasLLM(LLM, VolcEngineMaasBase):
"""volc engine maas hosts a plethora of models.
You can utilize these models through this class.
To use, you should have the ``volcengine`` python package installed.
and set access key and secret key by environment variable or direct pass those to
this class.
access key, secret key are required parameters which you could get help
https://www.volcengine.com/docs/6291/65568
In order to use them, it is necessary to install the 'volcengine' Python package.
The access key and secret key must be set either via environment variables or
passed directly to this class.
access key and secret key are mandatory parameters for which assistance can be
sought at https://www.volcengine.com/docs/6291/65568.
Example:
.. code-block:: python
from langchain_community.llms import VolcEngineMaasLLM
model = VolcEngineMaasLLM(model="skylark-lite-public",
volc_engine_maas_ak="your_ak",
volc_engine_maas_sk="your_sk")
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "volc-engine-maas-llm"
def _convert_prompt_msg_params(
self,
prompt: str,
**kwargs: Any,
) -> dict:
model_req = {
"model": {
"name": self.model,
}
}
if self.model_version is not None:
model_req["model"]["version"] = self.model_version
return {
**model_req,
"messages": [{"role": "user", "content": prompt}],
"parameters": {**self._default_params, **kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
params = self._convert_prompt_msg_params(prompt, **kwargs)
response = self.client.chat(params)
return response.get("choice", {}).get("message", {}).get("content", "")
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **kwargs)
for res in self.client.stream_chat(params):
if res:
chunk = GenerationChunk(
text=res.get("choice", {}).get("message", {}).get("content", "")
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/gooseai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""GooseAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any = None
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) # type: ignore[arg-type]
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="ignore",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
gooseai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "gooseai_api_key", "GOOSEAI_API_KEY")
)
values["gooseai_api_key"] = gooseai_api_key
try:
import openai
openai.api_key = gooseai_api_key.get_secret_value()
openai.api_base = "https://api.goose.ai/v1" # type: ignore[attr-defined]
values["client"] = openai.Completion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
params = {**params, **kwargs}
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/huggingface_pipeline.py | from __future__ import annotations
import importlib.util
import logging
from typing import Any, Iterator, List, Mapping, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from pydantic import ConfigDict
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = (
"text2text-generation",
"text-generation",
"summarization",
"translation",
)
DEFAULT_BATCH_SIZE = 4
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.37",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFacePipeline",
)
class HuggingFacePipeline(BaseLLM):
"""HuggingFace Pipeline API.
To use, you should have the ``transformers`` python package installed.
Only supports `text-generation`, `text2text-generation`, `summarization` and
`translation` for now.
Example using from_model_id:
.. code-block:: python
from langchain_community.llms import HuggingFacePipeline
hf = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
)
Example passing pipeline in directly:
.. code-block:: python
from langchain_community.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
hf = HuggingFacePipeline(pipeline=pipe)
"""
pipeline: Any = None #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Keyword arguments passed to the pipeline."""
batch_size: int = DEFAULT_BATCH_SIZE
"""Batch size to use when passing multiple documents to generate."""
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
backend: str = "default",
device: Optional[int] = -1,
device_map: Optional[str] = None,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
**kwargs: Any,
) -> HuggingFacePipeline:
"""Construct the pipeline object from model_id and task."""
try:
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers import pipeline as hf_pipeline
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
if backend == "openvino":
try:
from optimum.intel.openvino import OVModelForCausalLM
except ImportError:
raise ImportError(
"Could not import optimum-intel python package. "
"Please install it with: "
"pip install 'optimum[openvino,nncf]' "
)
try:
# use local model
model = OVModelForCausalLM.from_pretrained(
model_id, **_model_kwargs
)
except Exception:
# use remote model
model = OVModelForCausalLM.from_pretrained(
model_id, export=True, **_model_kwargs
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_id, **_model_kwargs
)
elif task in ("text2text-generation", "summarization", "translation"):
if backend == "openvino":
try:
from optimum.intel.openvino import OVModelForSeq2SeqLM
except ImportError:
raise ImportError(
"Could not import optimum-intel python package. "
"Please install it with: "
"pip install 'optimum[openvino,nncf]' "
)
try:
# use local model
model = OVModelForSeq2SeqLM.from_pretrained(
model_id, **_model_kwargs
)
except Exception:
# use remote model
model = OVModelForSeq2SeqLM.from_pretrained(
model_id, export=True, **_model_kwargs
)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id, **_model_kwargs
)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ImportError(
f"Could not load the {task} model due to missing dependencies."
) from e
if tokenizer.pad_token is None:
tokenizer.pad_token_id = model.config.eos_token_id
if (
(
getattr(model, "is_loaded_in_4bit", False)
or getattr(model, "is_loaded_in_8bit", False)
)
and device is not None
and backend == "default"
):
logger.warning(
f"Setting the `device` argument to None from {device} to avoid "
"the error caused by attempting to move the model that was already "
"loaded on the GPU using the Accelerate module to the same or "
"another device."
)
device = None
if (
device is not None
and importlib.util.find_spec("torch") is not None
and backend == "default"
):
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device_map is not None and device < 0:
device = None
if device is not None and device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 (default) for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
if device is not None and device_map is not None and backend == "openvino":
logger.warning("Please set device for OpenVINO through: `model_kwargs`")
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
device_map=device_map,
batch_size=batch_size,
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
batch_size=batch_size,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "huggingface_pipeline"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
# List to hold all results
text_generations: List[str] = []
default_pipeline_kwargs = self.pipeline_kwargs if self.pipeline_kwargs else {}
pipeline_kwargs = kwargs.get("pipeline_kwargs", default_pipeline_kwargs)
skip_prompt = kwargs.get("skip_prompt", False)
for i in range(0, len(prompts), self.batch_size):
batch_prompts = prompts[i : i + self.batch_size]
# Process batch of prompts
responses = self.pipeline(
batch_prompts,
**pipeline_kwargs,
)
# Process each response in the batch
for j, response in enumerate(responses):
if isinstance(response, list):
# if model returns multiple generations, pick the top one
response = response[0]
if self.pipeline.task == "text-generation":
text = response["generated_text"]
elif self.pipeline.task == "text2text-generation":
text = response["generated_text"]
elif self.pipeline.task == "summarization":
text = response["summary_text"]
elif self.pipeline.task in "translation":
text = response["translation_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if skip_prompt:
text = text[len(batch_prompts[j]) :]
# Append the processed text to results
text_generations.append(text)
return LLMResult(
generations=[[Generation(text=text)] for text in text_generations]
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
from threading import Thread
import torch
from transformers import (
StoppingCriteria,
StoppingCriteriaList,
TextIteratorStreamer,
)
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
skip_prompt = kwargs.get("skip_prompt", True)
if stop is not None:
stop = self.pipeline.tokenizer.convert_tokens_to_ids(stop)
stopping_ids_list = stop or []
class StopOnTokens(StoppingCriteria):
def __call__(
self,
input_ids: torch.LongTensor,
scores: torch.FloatTensor,
**kwargs: Any,
) -> bool:
for stop_id in stopping_ids_list:
if input_ids[0][-1] == stop_id:
return True
return False
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
inputs = self.pipeline.tokenizer(prompt, return_tensors="pt")
streamer = TextIteratorStreamer(
self.pipeline.tokenizer,
timeout=60.0,
skip_prompt=skip_prompt,
skip_special_tokens=True,
)
generation_kwargs = dict(
inputs,
streamer=streamer,
stopping_criteria=stopping_criteria,
**pipeline_kwargs,
)
t1 = Thread(target=self.pipeline.model.generate, kwargs=generation_kwargs)
t1.start()
for char in streamer:
chunk = GenerationChunk(text=char)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/nlpcloud.py | from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import NLPCloud
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any = None #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True
"""Whether to use a GPU or not"""
lang: str = "en"
"""Language to use (multilingual addon)"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
num_beams: int = 1
"""Number of beams for beam search."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["nlpcloud_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "nlpcloud_api_key", "NLPCLOUD_API_KEY")
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"],
values["nlpcloud_api_key"].get_secret_value(),
gpu=values["gpu"],
lang=values["lang"],
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"num_beams": self.num_beams,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**{"gpu": self.gpu},
**{"lang": self.lang},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/predibase.py | import os
from typing import Any, Dict, List, Mapping, Optional, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import Field, SecretStr
class Predibase(LLM):
"""Use your Predibase models with Langchain.
To use, you should have the ``predibase`` python package installed,
and have your Predibase API key.
The `model` parameter is the Predibase "serverless" base_model ID
(see https://docs.predibase.com/user-guide/inference/models for the catalog).
An optional `adapter_id` parameter is the Predibase ID or HuggingFace ID of a
fine-tuned LLM adapter, whose base model is the `model` parameter; the
fine-tuned adapter must be compatible with its base model;
otherwise, an error is raised. If the fine-tuned adapter is hosted at Predibase,
then `adapter_version` in the adapter repository must be specified.
An optional `predibase_sdk_version` parameter defaults to latest SDK version.
"""
model: str
predibase_api_key: SecretStr
predibase_sdk_version: Optional[str] = None
adapter_id: Optional[str] = None
adapter_version: Optional[int] = None
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
default_options_for_generation: dict = Field(
{
"max_new_tokens": 256,
"temperature": 0.1,
}
)
@property
def _llm_type(self) -> str:
return "predibase"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
options: Dict[str, Union[str, float]] = {
**self.default_options_for_generation,
**(self.model_kwargs or {}),
**(kwargs or {}),
}
if self._is_deprecated_sdk_version():
try:
from predibase import PredibaseClient
from predibase.pql import get_session
from predibase.pql.api import (
ServerResponseError,
Session,
)
from predibase.resource.llm.interface import (
HuggingFaceLLM,
LLMDeployment,
)
from predibase.resource.llm.response import GeneratedResponse
from predibase.resource.model import Model
session: Session = get_session(
token=self.predibase_api_key.get_secret_value(),
gateway="https://api.app.predibase.com/v1",
serving_endpoint="serving.app.predibase.com",
)
pc: PredibaseClient = PredibaseClient(session=session)
except ImportError as e:
raise ImportError(
"Could not import Predibase Python package. "
"Please install it with `pip install predibase`."
) from e
except ValueError as e:
raise ValueError("Your API key is not correct. Please try again") from e
base_llm_deployment: LLMDeployment = pc.LLM(
uri=f"pb://deployments/{self.model}"
)
result: GeneratedResponse
if self.adapter_id:
"""
Attempt to retrieve the fine-tuned adapter from a Predibase
repository. If absent, then load the fine-tuned adapter
from a HuggingFace repository.
"""
adapter_model: Union[Model, HuggingFaceLLM]
try:
adapter_model = pc.get_model(
name=self.adapter_id,
version=self.adapter_version,
model_id=None,
)
except ServerResponseError:
# Predibase does not recognize the adapter ID (query HuggingFace).
adapter_model = pc.LLM(uri=f"hf://{self.adapter_id}")
result = base_llm_deployment.with_adapter(model=adapter_model).generate(
prompt=prompt,
options=options,
)
else:
result = base_llm_deployment.generate(
prompt=prompt,
options=options,
)
return result.response
from predibase import Predibase
os.environ["PREDIBASE_GATEWAY"] = "https://api.app.predibase.com"
predibase: Predibase = Predibase(
api_token=self.predibase_api_key.get_secret_value()
)
import requests
from lorax.client import Client as LoraxClient
from lorax.errors import GenerationError
from lorax.types import Response
lorax_client: LoraxClient = predibase.deployments.client(
deployment_ref=self.model
)
response: Response
if self.adapter_id:
"""
Attempt to retrieve the fine-tuned adapter from a Predibase repository.
If absent, then load the fine-tuned adapter from a HuggingFace repository.
"""
if self.adapter_version:
# Since the adapter version is provided, query the Predibase repository.
pb_adapter_id: str = f"{self.adapter_id}/{self.adapter_version}"
options.pop(
"api_token", None
) # The "api_token" is not used for Predibase-hosted models.
try:
response = lorax_client.generate(
prompt=prompt,
adapter_id=pb_adapter_id,
**options,
)
except GenerationError as ge:
raise ValueError(
f"""An adapter with the ID "{pb_adapter_id}" cannot be \
found in the Predibase repository of fine-tuned adapters."""
) from ge
else:
# The adapter version is omitted,
# hence look for the adapter ID in the HuggingFace repository.
try:
response = lorax_client.generate(
prompt=prompt,
adapter_id=self.adapter_id,
adapter_source="hub",
**options,
)
except GenerationError as ge:
raise ValueError(
f"""Either an adapter with the ID "{self.adapter_id}" \
cannot be found in a HuggingFace repository, or it is incompatible with the \
base model (please make sure that the adapter configuration is consistent).
"""
) from ge
else:
try:
response = lorax_client.generate(
prompt=prompt,
**options,
)
except requests.JSONDecodeError as jde:
raise ValueError(
f"""An LLM with the deployment ID "{self.model}" cannot be found \
at Predibase (please refer to \
"https://docs.predibase.com/user-guide/inference/models" for the list of \
supported models).
"""
) from jde
response_text = response.generated_text
return response_text
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
def _is_deprecated_sdk_version(self) -> bool:
try:
import semantic_version
from predibase.version import __version__ as current_version
from semantic_version.base import Version
sdk_semver_deprecated: Version = semantic_version.Version(
version_string="2024.4.8"
)
actual_current_version: str = self.predibase_sdk_version or current_version
sdk_semver_current: Version = semantic_version.Version(
version_string=actual_current_version
)
return not (
(sdk_semver_current > sdk_semver_deprecated)
or ("+dev" in actual_current_version)
)
except ImportError as e:
raise ImportError(
"Could not import Predibase Python package. "
"Please install it with `pip install semantic_version predibase`."
) from e
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/textgen.py | import json
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import Field
logger = logging.getLogger(__name__)
class TextGen(LLM):
"""Text generation models from WebUI.
To use, you should have the text-generation-webui installed, a model loaded,
and --api added as a command-line option.
Suggested installation, use one-click installer for your OS:
https://github.com/oobabooga/text-generation-webui#one-click-installers
Parameters below taken from text-generation-webui api example:
https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:8500")
"""
model_url: str
"""The full URL to the textgen webui including http[s]://host:port """
preset: Optional[str] = None
"""The preset to use in the textgen webui """
max_new_tokens: Optional[int] = 250
"""The maximum number of tokens to generate."""
do_sample: bool = Field(True, alias="do_sample")
"""Do sample"""
temperature: Optional[float] = 1.3
"""Primary factor to control randomness of outputs. 0 = deterministic
(only the most likely token is used). Higher value = more randomness."""
top_p: Optional[float] = 0.1
"""If not set to 1, select tokens with probabilities adding up to less than this
number. Higher value = higher range of possible random results."""
typical_p: Optional[float] = 1
"""If not set to 1, select only tokens that are at least this much more likely to
appear than random tokens, given the prior text."""
epsilon_cutoff: Optional[float] = 0 # In units of 1e-4
"""Epsilon cutoff"""
eta_cutoff: Optional[float] = 0 # In units of 1e-4
"""ETA cutoff"""
repetition_penalty: Optional[float] = 1.18
"""Exponential penalty factor for repeating prior tokens. 1 means no penalty,
higher value = less repetition, lower value = more repetition."""
top_k: Optional[float] = 40
"""Similar to top_p, but select instead only the top_k most likely tokens.
Higher value = higher range of possible random results."""
min_length: Optional[int] = 0
"""Minimum generation length in tokens."""
no_repeat_ngram_size: Optional[int] = 0
"""If not set to 0, specifies the length of token sets that are completely blocked
from repeating at all. Higher values = blocks larger phrases,
lower values = blocks words or letters from repeating.
Only 0 or high values are a good idea in most cases."""
num_beams: Optional[int] = 1
"""Number of beams"""
penalty_alpha: Optional[float] = 0
"""Penalty Alpha"""
length_penalty: Optional[float] = 1
"""Length Penalty"""
early_stopping: bool = Field(False, alias="early_stopping")
"""Early stopping"""
seed: int = Field(-1, alias="seed")
"""Seed (-1 for random)"""
add_bos_token: bool = Field(True, alias="add_bos_token")
"""Add the bos_token to the beginning of prompts.
Disabling this can make the replies more creative."""
truncation_length: Optional[int] = 2048
"""Truncate the prompt up to this length. The leftmost tokens are removed if
the prompt exceeds this length. Most models require this to be at most 2048."""
ban_eos_token: bool = Field(False, alias="ban_eos_token")
"""Ban the eos_token. Forces the model to never end the generation prematurely."""
skip_special_tokens: bool = Field(True, alias="skip_special_tokens")
"""Skip special tokens. Some specific models need this unset."""
stopping_strings: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling textgen."""
return {
"max_new_tokens": self.max_new_tokens,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_p": self.top_p,
"typical_p": self.typical_p,
"epsilon_cutoff": self.epsilon_cutoff,
"eta_cutoff": self.eta_cutoff,
"repetition_penalty": self.repetition_penalty,
"top_k": self.top_k,
"min_length": self.min_length,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
"num_beams": self.num_beams,
"penalty_alpha": self.penalty_alpha,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"seed": self.seed,
"add_bos_token": self.add_bos_token,
"truncation_length": self.truncation_length,
"ban_eos_token": self.ban_eos_token,
"skip_special_tokens": self.skip_special_tokens,
"stopping_strings": self.stopping_strings,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_url": self.model_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "textgen"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
if self.stopping_strings and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
params["stopping_strings"] = self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm.invoke("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}") # noqa: T201
result = ""
return result
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm.invoke("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}") # noqa: T201
result = ""
return result
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True) # noqa: T201
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream": # type: ignore[call-overload, index]
chunk = GenerationChunk(
text=result["text"], # type: ignore[call-overload, index]
generation_info=None,
)
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
yield chunk
elif result["event"] == "stream_end": # type: ignore[call-overload, index]
websocket_client.close()
return
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True) # noqa: T201
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream": # type: ignore[call-overload, index]
chunk = GenerationChunk(
text=result["text"], # type: ignore[call-overload, index]
generation_info=None,
)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text)
yield chunk
elif result["event"] == "stream_end": # type: ignore[call-overload, index]
websocket_client.close()
return
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/koboldai.py | import logging
from typing import Any, Dict, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
logger = logging.getLogger(__name__)
def clean_url(url: str) -> str:
"""Remove trailing slash and /api from url if present."""
if url.endswith("/api"):
return url[:-4]
elif url.endswith("/"):
return url[:-1]
else:
return url
class KoboldApiLLM(LLM):
"""Kobold API language model.
It includes several fields that can be used to control the text generation process.
To use this class, instantiate it with the required parameters and call it with a
prompt to generate text. For example:
kobold = KoboldApiLLM(endpoint="http://localhost:5000")
result = kobold("Write a story about a dragon.")
This will send a POST request to the Kobold API with the provided prompt and
generate text.
"""
endpoint: str
"""The API endpoint to use for generating text."""
use_story: Optional[bool] = False
""" Whether or not to use the story from the KoboldAI GUI when generating text. """
use_authors_note: Optional[bool] = False
"""Whether to use the author's note from the KoboldAI GUI when generating text.
This has no effect unless use_story is also enabled.
"""
use_world_info: Optional[bool] = False
"""Whether to use the world info from the KoboldAI GUI when generating text."""
use_memory: Optional[bool] = False
"""Whether to use the memory from the KoboldAI GUI when generating text."""
max_context_length: Optional[int] = 1600
"""Maximum number of tokens to send to the model.
minimum: 1
"""
max_length: Optional[int] = 80
"""Number of tokens to generate.
maximum: 512
minimum: 1
"""
rep_pen: Optional[float] = 1.12
"""Base repetition penalty value.
minimum: 1
"""
rep_pen_range: Optional[int] = 1024
"""Repetition penalty range.
minimum: 0
"""
rep_pen_slope: Optional[float] = 0.9
"""Repetition penalty slope.
minimum: 0
"""
temperature: Optional[float] = 0.6
"""Temperature value.
exclusiveMinimum: 0
"""
tfs: Optional[float] = 0.9
"""Tail free sampling value.
maximum: 1
minimum: 0
"""
top_a: Optional[float] = 0.9
"""Top-a sampling value.
minimum: 0
"""
top_p: Optional[float] = 0.95
"""Top-p sampling value.
maximum: 1
minimum: 0
"""
top_k: Optional[int] = 0
"""Top-k sampling value.
minimum: 0
"""
typical: Optional[float] = 0.5
"""Typical sampling value.
maximum: 1
minimum: 0
"""
@property
def _llm_type(self) -> str:
return "koboldai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm.invoke("Write a story about dragons.")
"""
data: Dict[str, Any] = {
"prompt": prompt,
"use_story": self.use_story,
"use_authors_note": self.use_authors_note,
"use_world_info": self.use_world_info,
"use_memory": self.use_memory,
"max_context_length": self.max_context_length,
"max_length": self.max_length,
"rep_pen": self.rep_pen,
"rep_pen_range": self.rep_pen_range,
"rep_pen_slope": self.rep_pen_slope,
"temperature": self.temperature,
"tfs": self.tfs,
"top_a": self.top_a,
"top_p": self.top_p,
"top_k": self.top_k,
"typical": self.typical,
}
if stop is not None:
data["stop_sequence"] = stop
response = requests.post(
f"{clean_url(self.endpoint)}/api/v1/generate", json=data
)
response.raise_for_status()
json_response = response.json()
if (
"results" in json_response
and len(json_response["results"]) > 0
and "text" in json_response["results"][0]
):
text = json_response["results"][0]["text"].strip()
if stop is not None:
for sequence in stop:
if text.endswith(sequence):
text = text[: -len(sequence)].rstrip()
return text
else:
raise ValueError(
f"Unexpected response format from Kobold API: {json_response}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/ctranslate2.py | from typing import Any, Dict, List, Optional, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import pre_init
from pydantic import Field
class CTranslate2(BaseLLM):
"""CTranslate2 language model."""
model_path: str = ""
"""Path to the CTranslate2 model directory."""
tokenizer_name: str = ""
"""Name of the original Hugging Face model needed to load the proper tokenizer."""
device: str = "cpu"
"""Device to use (possible values are: cpu, cuda, auto)."""
device_index: Union[int, List[int]] = 0
"""Device IDs where to place this generator on."""
compute_type: Union[str, Dict[str, str]] = "default"
"""
Model computation type or a dictionary mapping a device name to the computation type
(possible values are: default, auto, int8, int8_float32, int8_float16,
int8_bfloat16, int16, float16, bfloat16, float32).
"""
max_length: int = 512
"""Maximum generation length."""
sampling_topk: int = 1
"""Randomly sample predictions from the top K candidates."""
sampling_topp: float = 1
"""Keep the most probable tokens whose cumulative probability exceeds this value."""
sampling_temperature: float = 1
"""Sampling temperature to generate more random samples."""
client: Any = None #: :meta private:
tokenizer: Any = None #: :meta private:
ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""
Holds any model parameters valid for `ctranslate2.Generator` call not
explicitly specified.
"""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
import ctranslate2
except ImportError:
raise ImportError(
"Could not import ctranslate2 python package. "
"Please install it with `pip install ctranslate2`."
)
try:
import transformers
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
values["client"] = ctranslate2.Generator(
model_path=values["model_path"],
device=values["device"],
device_index=values["device_index"],
compute_type=values["compute_type"],
**values["ctranslate2_kwargs"],
)
values["tokenizer"] = transformers.AutoTokenizer.from_pretrained(
values["tokenizer_name"]
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters."""
return {
"max_length": self.max_length,
"sampling_topk": self.sampling_topk,
"sampling_topp": self.sampling_topp,
"sampling_temperature": self.sampling_temperature,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
# build sampling parameters
params = {**self._default_params, **kwargs}
# call the model
encoded_prompts = self.tokenizer(prompts)["input_ids"]
tokenized_prompts = [
self.tokenizer.convert_ids_to_tokens(encoded_prompt)
for encoded_prompt in encoded_prompts
]
results = self.client.generate_batch(tokenized_prompts, **params)
sequences = [result.sequences_ids[0] for result in results]
decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences]
generations = []
for text in decoded_sequences:
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ctranslate2"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py | # Copyright (c) 2023, 2024, Oracle and/or its affiliates.
"""LLM for OCI data science model deployment endpoint."""
import json
import logging
import traceback
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Union,
)
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator
from langchain_core.load.serializable import Serializable
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import get_from_dict_or_env
from pydantic import Field, model_validator
from langchain_community.utilities.requests import Requests
logger = logging.getLogger(__name__)
DEFAULT_TIME_OUT = 300
DEFAULT_CONTENT_TYPE_JSON = "application/json"
DEFAULT_MODEL_NAME = "odsc-llm"
class TokenExpiredError(Exception):
"""Raises when token expired."""
class ServerError(Exception):
"""Raises when encounter server error when making inference."""
def _create_retry_decorator(
llm: "BaseOCIModelDeployment",
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Create a retry decorator."""
errors = [requests.exceptions.ConnectTimeout, TokenExpiredError]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
return decorator
class BaseOCIModelDeployment(Serializable):
"""Base class for LLM deployed on OCI Data Science Model Deployment."""
auth: dict = Field(default_factory=dict, exclude=True)
"""ADS auth dictionary for OCI authentication:
https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html.
This can be generated by calling `ads.common.auth.api_keys()`
or `ads.common.auth.resource_principal()`. If this is not
provided then the `ads.common.default_signer()` will be used."""
endpoint: str = ""
"""The uri of the endpoint from the deployed Model Deployment model."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 3
"""Maximum number of retries to make when generating."""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Dict:
"""Checks if oracle-ads is installed and
get credentials/endpoint from environment.
"""
try:
import ads
except ImportError as ex:
raise ImportError(
"Could not import ads python package. "
"Please install it with `pip install oracle_ads`."
) from ex
if not values.get("auth", None):
values["auth"] = ads.common.auth.default_signer()
values["endpoint"] = get_from_dict_or_env(
values,
"endpoint",
"OCI_LLM_ENDPOINT",
)
return values
def _headers(
self, is_async: Optional[bool] = False, body: Optional[dict] = None
) -> Dict:
"""Construct and return the headers for a request.
Args:
is_async (bool, optional): Indicates if the request is asynchronous.
Defaults to `False`.
body (optional): The request body to be included in the headers if
the request is asynchronous.
Returns:
Dict: A dictionary containing the appropriate headers for the request.
"""
if is_async:
signer = self.auth["signer"]
_req = requests.Request("POST", self.endpoint, json=body)
req = _req.prepare()
req = signer(req)
headers = {}
for key, value in req.headers.items():
headers[key] = value
if self.streaming:
headers.update(
{"enable-streaming": "true", "Accept": "text/event-stream"}
)
return headers
return (
{
"Content-Type": DEFAULT_CONTENT_TYPE_JSON,
"enable-streaming": "true",
"Accept": "text/event-stream",
}
if self.streaming
else {
"Content-Type": DEFAULT_CONTENT_TYPE_JSON,
}
)
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
try:
request_timeout = kwargs.pop("request_timeout", DEFAULT_TIME_OUT)
data = kwargs.pop("data")
stream = kwargs.pop("stream", self.streaming)
request = Requests(
headers=self._headers(), auth=self.auth.get("signer")
)
response = request.post(
url=self.endpoint,
data=data,
timeout=request_timeout,
stream=stream,
**kwargs,
)
self._check_response(response)
return response
except TokenExpiredError as e:
raise e
except Exception as err:
traceback.print_exc()
logger.debug(
f"Requests payload: {data}. Requests arguments: "
f"url={self.endpoint},timeout={request_timeout},stream={stream}. "
f"Additional request kwargs={kwargs}."
)
raise RuntimeError(
f"Error occurs by inference endpoint: {str(err)}"
) from err
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
self,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
try:
request_timeout = kwargs.pop("request_timeout", DEFAULT_TIME_OUT)
data = kwargs.pop("data")
stream = kwargs.pop("stream", self.streaming)
request = Requests(headers=self._headers(is_async=True, body=data))
if stream:
response = request.apost(
url=self.endpoint,
data=data,
timeout=request_timeout,
)
return self._aiter_sse(response)
else:
async with request.apost(
url=self.endpoint,
data=data,
timeout=request_timeout,
) as resp:
self._check_response(resp)
data = await resp.json()
return data
except TokenExpiredError as e:
raise e
except Exception as err:
traceback.print_exc()
logger.debug(
f"Requests payload: `{data}`. "
f"Stream mode={stream}. "
f"Requests kwargs: url={self.endpoint}, timeout={request_timeout}."
)
raise RuntimeError(
f"Error occurs by inference endpoint: {str(err)}"
) from err
return await _completion_with_retry(**kwargs)
def _check_response(self, response: Any) -> None:
"""Handle server error by checking the response status.
Args:
response:
The response object from either `requests` or `aiohttp` library.
Raises:
TokenExpiredError:
If the response status code is 401 and the token refresh is successful.
ServerError:
If any other HTTP error occurs.
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as http_err:
status_code = (
response.status_code
if hasattr(response, "status_code")
else response.status
)
if status_code == 401 and self._refresh_signer():
raise TokenExpiredError() from http_err
raise ServerError(
f"Server error: {str(http_err)}. \nMessage: {response.text}"
) from http_err
def _parse_stream(self, lines: Iterator[bytes]) -> Iterator[str]:
"""Parse a stream of byte lines and yield parsed string lines.
Args:
lines (Iterator[bytes]):
An iterator that yields lines in byte format.
Yields:
Iterator[str]:
An iterator that yields parsed lines as strings.
"""
for line in lines:
_line = self._parse_stream_line(line)
if _line is not None:
yield _line
async def _parse_stream_async(
self,
lines: aiohttp.StreamReader,
) -> AsyncIterator[str]:
"""
Asynchronously parse a stream of byte lines and yield parsed string lines.
Args:
lines (aiohttp.StreamReader):
An `aiohttp.StreamReader` object that yields lines in byte format.
Yields:
AsyncIterator[str]:
An asynchronous iterator that yields parsed lines as strings.
"""
async for line in lines:
_line = self._parse_stream_line(line)
if _line is not None:
yield _line
def _parse_stream_line(self, line: bytes) -> Optional[str]:
"""Parse a single byte line and return a processed string line if valid.
Args:
line (bytes): A single line in byte format.
Returns:
Optional[str]:
The processed line as a string if valid, otherwise `None`.
"""
line = line.strip()
if not line:
return None
_line = line.decode("utf-8")
if _line.lower().startswith("data:"):
_line = _line[5:].lstrip()
if _line.startswith("[DONE]"):
return None
return _line
return None
async def _aiter_sse(
self,
async_cntx_mgr: Any,
) -> AsyncIterator[str]:
"""Asynchronously iterate over server-sent events (SSE).
Args:
async_cntx_mgr: An asynchronous context manager that yields a client
response object.
Yields:
AsyncIterator[str]: An asynchronous iterator that yields parsed server-sent
event lines as json string.
"""
async with async_cntx_mgr as client_resp:
self._check_response(client_resp)
async for line in self._parse_stream_async(client_resp.content):
yield line
def _refresh_signer(self) -> bool:
"""Attempt to refresh the security token using the signer.
Returns:
bool: `True` if the token was successfully refreshed, `False` otherwise.
"""
if self.auth.get("signer", None) and hasattr(
self.auth["signer"], "refresh_security_token"
):
self.auth["signer"].refresh_security_token()
return True
return False
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
class OCIModelDeploymentLLM(BaseLLM, BaseOCIModelDeployment):
"""LLM deployed on OCI Data Science Model Deployment.
To use, you must provide the model HTTP endpoint from your deployed
model, e.g. https://modeldeployment.<region>.oci.customer-oci.com/<md_ocid>/predict.
To authenticate, `oracle-ads` has been used to automatically load
credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint
Example:
.. code-block:: python
from langchain_community.llms import OCIModelDeploymentLLM
llm = OCIModelDeploymentLLM(
endpoint="https://modeldeployment.us-ashburn-1.oci.customer-oci.com/<ocid>/predict",
model="odsc-llm",
streaming=True,
model_kwargs={"frequency_penalty": 1.0},
)
llm.invoke("tell me a joke.")
Customized Usage:
User can inherit from our base class and overrwrite the `_process_response`, `_process_stream_response`,
`_construct_json_body` for satisfying customized needed.
.. code-block:: python
from langchain_community.llms import OCIModelDeploymentLLM
class MyCutomizedModel(OCIModelDeploymentLLM):
def _process_stream_response(self, response_json:dict) -> GenerationChunk:
print("My customized output stream handler.")
return GenerationChunk()
def _process_response(self, response_json:dict) -> List[Generation]:
print("My customized output handler.")
return [Generation()]
def _construct_json_body(self, prompt: str, param:dict) -> dict:
print("My customized input handler.")
return {}
llm = MyCutomizedModel(
endpoint=f"https://modeldeployment.us-ashburn-1.oci.customer-oci.com/{ocid}/predict",
model="<model_name>",
}
llm.invoke("tell me a joke.")
""" # noqa: E501
model: str = DEFAULT_MODEL_NAME
"""The name of the model."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.2
"""A non-negative float that tunes the degree of randomness in generation."""
k: int = -1
"""Number of most likely tokens to consider at each step."""
p: float = 0.75
"""Total probability mass of tokens to consider at each step."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"
(the one with the highest log probability per token).
"""
stop: Optional[List[str]] = None
"""Stop words to use when generating. Model output is cut off
at the first occurrence of any of these substrings."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_deployment_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters."""
return {
"best_of": self.best_of,
"max_tokens": self.max_tokens,
"model": self.model,
"stop": self.stop,
"stream": self.streaming,
"temperature": self.temperature,
"top_k": self.k,
"top_p": self.p,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint": self.endpoint, "model_kwargs": _model_kwargs},
**self._default_params,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OCI Data Science Model Deployment endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the service.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = llm.invoke("Tell me a joke.")
response = llm.generate(["Tell me a joke."])
"""
generations: List[List[Generation]] = []
params = self._invocation_params(stop, **kwargs)
for prompt in prompts:
body = self._construct_json_body(prompt, params)
if self.streaming:
generation = GenerationChunk(text="")
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
generation += chunk
generations.append([generation])
else:
res = self.completion_with_retry(
data=body,
run_manager=run_manager,
**kwargs,
)
generations.append(self._process_response(res.json()))
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OCI Data Science Model Deployment endpoint async with k unique prompts.
Args:
prompts: The prompts to pass into the service.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = await llm.ainvoke("Tell me a joke.")
response = await llm.agenerate(["Tell me a joke."])
""" # noqa: E501
generations: List[List[Generation]] = []
params = self._invocation_params(stop, **kwargs)
for prompt in prompts:
body = self._construct_json_body(prompt, params)
if self.streaming:
generation = GenerationChunk(text="")
async for chunk in self._astream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
generation += chunk
generations.append([generation])
else:
res = await self.acompletion_with_retry(
data=body,
run_manager=run_manager,
**kwargs,
)
generations.append(self._process_response(res))
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Stream OCI Data Science Model Deployment endpoint on given prompt.
Args:
prompt (str):
The prompt to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
kwargs:
requests_kwargs:
Additional ``**kwargs`` to pass to requests.post
Returns:
An iterator of GenerationChunks.
Example:
.. code-block:: python
response = llm.stream("Tell me a joke.")
"""
requests_kwargs = kwargs.pop("requests_kwargs", {})
self.streaming = True
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(prompt, params)
response = self.completion_with_retry(
data=body, run_manager=run_manager, stream=True, **requests_kwargs
)
for line in self._parse_stream(response.iter_lines()):
chunk = self._handle_sse_line(line)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Stream OCI Data Science Model Deployment endpoint async on given prompt.
Args:
prompt (str):
The prompt to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
kwargs:
requests_kwargs:
Additional ``**kwargs`` to pass to requests.post
Returns:
An iterator of GenerationChunks.
Example:
.. code-block:: python
async for chunk in llm.astream(("Tell me a joke."):
print(chunk, end="", flush=True)
"""
requests_kwargs = kwargs.pop("requests_kwargs", {})
self.streaming = True
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(prompt, params)
async for line in await self.acompletion_with_retry(
data=body, run_manager=run_manager, stream=True, **requests_kwargs
):
chunk = self._handle_sse_line(line)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def _construct_json_body(self, prompt: str, params: dict) -> dict:
"""Constructs the request body as a dictionary (JSON)."""
return {
"prompt": prompt,
**params,
}
def _invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> dict:
"""Combines the invocation parameters with default parameters."""
params = self._default_params
_model_kwargs = self.model_kwargs or {}
params["stop"] = stop or params.get("stop", [])
return {**params, **_model_kwargs, **kwargs}
def _process_stream_response(self, response_json: dict) -> GenerationChunk:
"""Formats streaming response for OpenAI spec into GenerationChunk."""
try:
choice = response_json["choices"][0]
if not isinstance(choice, dict):
raise TypeError("Endpoint response is not well formed.")
except (KeyError, IndexError, TypeError) as e:
raise ValueError("Error while formatting response payload.") from e
return GenerationChunk(text=choice.get("text", ""))
def _process_response(self, response_json: dict) -> List[Generation]:
"""Formats response in OpenAI spec.
Args:
response_json (dict): The JSON response from the chat model endpoint.
Returns:
ChatResult: An object containing the list of `ChatGeneration` objects
and additional LLM output information.
Raises:
ValueError: If the response JSON is not well-formed or does not
contain the expected structure.
"""
generations = []
try:
choices = response_json["choices"]
if not isinstance(choices, list):
raise TypeError("Endpoint response is not well formed.")
except (KeyError, TypeError) as e:
raise ValueError("Error while formatting response payload.") from e
for choice in choices:
gen = Generation(
text=choice.get("text"),
generation_info=self._generate_info(choice),
)
generations.append(gen)
return generations
def _generate_info(self, choice: dict) -> Any:
"""Extracts generation info from the response."""
gen_info = {}
finish_reason = choice.get("finish_reason", None)
logprobs = choice.get("logprobs", None)
index = choice.get("index", None)
if finish_reason:
gen_info.update({"finish_reason": finish_reason})
if logprobs is not None:
gen_info.update({"logprobs": logprobs})
if index is not None:
gen_info.update({"index": index})
return gen_info or None
def _handle_sse_line(self, line: str) -> GenerationChunk:
try:
obj = json.loads(line)
return self._process_stream_response(obj)
except Exception:
return GenerationChunk(text="")
class OCIModelDeploymentTGI(OCIModelDeploymentLLM):
"""OCI Data Science Model Deployment TGI Endpoint.
To use, you must provide the model HTTP endpoint from your deployed
model, e.g. https://modeldeployment.<region>.oci.customer-oci.com/<md_ocid>/predict.
To authenticate, `oracle-ads` has been used to automatically load
credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint
Example:
.. code-block:: python
from langchain_community.llms import OCIModelDeploymentTGI
llm = OCIModelDeploymentTGI(
endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<md_ocid>/predict",
api="/v1/completions",
streaming=True,
temperature=0.2,
seed=42,
# other model parameters ...
)
"""
api: Literal["/generate", "/v1/completions"] = "/v1/completions"
"""Api spec."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
seed: Optional[int] = None
"""Random sampling seed"""
repetition_penalty: Optional[float] = None
"""The parameter for repetition penalty. 1.0 means no penalty."""
suffix: Optional[str] = None
"""The text to append to the prompt. """
do_sample: bool = True
"""If set to True, this parameter enables decoding strategies such as
multi-nominal sampling, beam-search multi-nominal sampling, Top-K
sampling and Top-p sampling.
"""
watermark: bool = True
"""Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_.
Defaults to True."""
return_full_text: bool = False
"""Whether to prepend the prompt to the generated text. Defaults to False."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_deployment_tgi_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for invoking OCI model deployment TGI endpoint."""
return (
{
"model": self.model, # can be any
"frequency_penalty": self.frequency_penalty,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
"temperature": self.temperature,
"top_p": self.p,
"seed": self.seed,
"stream": self.streaming,
"suffix": self.suffix,
"stop": self.stop,
}
if self.api == "/v1/completions"
else {
"best_of": self.best_of,
"max_new_tokens": self.max_tokens,
"temperature": self.temperature,
"top_k": (
self.k if self.k > 0 else None
), # `top_k` must be strictly positive'
"top_p": self.p,
"do_sample": self.do_sample,
"return_full_text": self.return_full_text,
"watermark": self.watermark,
"stop": self.stop,
}
)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{
"endpoint": self.endpoint,
"api": self.api,
"model_kwargs": _model_kwargs,
},
**self._default_params,
}
def _construct_json_body(self, prompt: str, params: dict) -> dict:
"""Construct request payload."""
if self.api == "/v1/completions":
return super()._construct_json_body(prompt, params)
return {
"inputs": prompt,
"parameters": params,
}
def _process_response(self, response_json: dict) -> List[Generation]:
"""Formats response."""
if self.api == "/v1/completions":
return super()._process_response(response_json)
try:
text = response_json["generated_text"]
except KeyError as e:
raise ValueError(
f"Error while formatting response payload.response_json={response_json}"
) from e
return [Generation(text=text)]
class OCIModelDeploymentVLLM(OCIModelDeploymentLLM):
"""VLLM deployed on OCI Data Science Model Deployment
To use, you must provide the model HTTP endpoint from your deployed
model, e.g. https://modeldeployment.<region>.oci.customer-oci.com/<md_ocid>/predict.
To authenticate, `oracle-ads` has been used to automatically load
credentials: https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Make sure to have the required policies to access the OCI Data
Science Model Deployment endpoint. See:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm#model_dep_policies_auth__predict-endpoint
Example:
.. code-block:: python
from langchain_community.llms import OCIModelDeploymentVLLM
llm = OCIModelDeploymentVLLM(
endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<md_ocid>/predict",
model="odsc-llm",
streaming=False,
temperature=0.2,
max_tokens=512,
n=3,
best_of=3,
# other model parameters
)
"""
n: int = 1
"""Number of output sequences to return for the given prompt."""
k: int = -1
"""Number of most likely tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_deployment_vllm_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"best_of": self.best_of,
"frequency_penalty": self.frequency_penalty,
"ignore_eos": self.ignore_eos,
"logprobs": self.logprobs,
"max_tokens": self.max_tokens,
"model": self.model,
"n": self.n,
"presence_penalty": self.presence_penalty,
"stop": self.stop,
"stream": self.streaming,
"temperature": self.temperature,
"top_k": self.k,
"top_p": self.p,
"use_beam_search": self.use_beam_search,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/opaqueprompts.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
logger = logging.getLogger(__name__)
class OpaquePrompts(LLM):
"""LLM that uses OpaquePrompts to sanitize prompts.
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
de-sanitizes the response.
To use, you should have the ``opaqueprompts`` python package installed,
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import OpaquePrompts
from langchain_community.chat_models import ChatOpenAI
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
"""The base LLM to use."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if op.__package__ is None:
raise ValueError(
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm.invoke("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.bind(stop=stop).invoke(
sanitized_prompt_value_str,
)
if isinstance(llm_response, AIMessage):
llm_response = llm_response.content
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
return desanitize_response.desanitized_text
@property
def _llm_type(self) -> str:
"""Return type of LLM.
This is an override of the base class method.
"""
return "opaqueprompts"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/ipex_llm.py | import logging
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict
DEFAULT_MODEL_ID = "gpt2"
logger = logging.getLogger(__name__)
class IpexLLM(LLM):
"""IpexLLM model.
Example:
.. code-block:: python
from langchain_community.llms import IpexLLM
llm = IpexLLM.from_model_id(model_id="THUDM/chatglm-6b")
"""
model_id: str = DEFAULT_MODEL_ID
"""Model name or model path to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments passed to the model."""
model: Any = None #: :meta private:
"""IpexLLM model."""
tokenizer: Any = None #: :meta private:
"""Huggingface tokenizer model."""
streaming: bool = True
"""Whether to stream the results, token by token."""
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def from_model_id(
cls,
model_id: str,
model_kwargs: Optional[dict] = None,
*,
tokenizer_id: Optional[str] = None,
load_in_4bit: bool = True,
load_in_low_bit: Optional[str] = None,
**kwargs: Any,
) -> LLM:
"""
Construct object from model_id
Args:
model_id: Path for the huggingface repo id to be downloaded or
the huggingface checkpoint folder.
tokenizer_id: Path for the huggingface repo id to be downloaded or
the huggingface checkpoint folder which contains the tokenizer.
load_in_4bit: "Whether to load model in 4bit.
Unused if `load_in_low_bit` is not None.
load_in_low_bit: Which low bit precisions to use when loading model.
Example values: 'sym_int4', 'asym_int4', 'fp4', 'nf4', 'fp8', etc.
Overrides `load_in_4bit` if specified.
model_kwargs: Keyword arguments to pass to the model and tokenizer.
kwargs: Extra arguments to pass to the model and tokenizer.
Returns:
An object of IpexLLM.
"""
return cls._load_model(
model_id=model_id,
tokenizer_id=tokenizer_id,
low_bit_model=False,
load_in_4bit=load_in_4bit,
load_in_low_bit=load_in_low_bit,
model_kwargs=model_kwargs,
kwargs=kwargs,
)
@classmethod
def from_model_id_low_bit(
cls,
model_id: str,
model_kwargs: Optional[dict] = None,
*,
tokenizer_id: Optional[str] = None,
**kwargs: Any,
) -> LLM:
"""
Construct low_bit object from model_id
Args:
model_id: Path for the ipex-llm transformers low-bit model folder.
tokenizer_id: Path for the huggingface repo id or local model folder
which contains the tokenizer.
model_kwargs: Keyword arguments to pass to the model and tokenizer.
kwargs: Extra arguments to pass to the model and tokenizer.
Returns:
An object of IpexLLM.
"""
return cls._load_model(
model_id=model_id,
tokenizer_id=tokenizer_id,
low_bit_model=True,
load_in_4bit=False, # not used for low-bit model
load_in_low_bit=None, # not used for low-bit model
model_kwargs=model_kwargs,
kwargs=kwargs,
)
@classmethod
def _load_model(
cls,
model_id: str,
tokenizer_id: Optional[str] = None,
load_in_4bit: bool = False,
load_in_low_bit: Optional[str] = None,
low_bit_model: bool = False,
model_kwargs: Optional[dict] = None,
kwargs: Optional[dict] = None,
) -> Any:
try:
from ipex_llm.transformers import (
AutoModel,
AutoModelForCausalLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
except ImportError:
raise ImportError(
"Could not import ipex-llm. "
"Please install `ipex-llm` properly following installation guides: "
"https://github.com/intel-analytics/ipex-llm?tab=readme-ov-file#install-ipex-llm."
)
_model_kwargs = model_kwargs or {}
kwargs = kwargs or {}
_tokenizer_id = tokenizer_id or model_id
# Set "cpu" as default device
if "device" not in _model_kwargs:
_model_kwargs["device"] = "cpu"
if _model_kwargs["device"] not in ["cpu", "xpu"]:
raise ValueError(
"IpexLLMBgeEmbeddings currently only supports device to be "
f"'cpu' or 'xpu', but you have: {_model_kwargs['device']}."
)
device = _model_kwargs.pop("device")
try:
tokenizer = AutoTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
except Exception:
tokenizer = LlamaTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
# restore model_kwargs
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
# load model with AutoModelForCausalLM and falls back to AutoModel on failure.
load_kwargs = {
"use_cache": True,
"trust_remote_code": True,
}
if not low_bit_model:
if load_in_low_bit is not None:
load_function_name = "from_pretrained"
load_kwargs["load_in_low_bit"] = load_in_low_bit # type: ignore
else:
load_function_name = "from_pretrained"
load_kwargs["load_in_4bit"] = load_in_4bit
else:
load_function_name = "load_low_bit"
try:
# Attempt to load with AutoModelForCausalLM
model = cls._load_model_general(
AutoModelForCausalLM,
load_function_name=load_function_name,
model_id=model_id,
load_kwargs=load_kwargs,
model_kwargs=_model_kwargs,
)
except Exception:
# Fallback to AutoModel if there's an exception
model = cls._load_model_general(
AutoModel,
load_function_name=load_function_name,
model_id=model_id,
load_kwargs=load_kwargs,
model_kwargs=_model_kwargs,
)
model.to(device)
return cls(
model_id=model_id,
model=model,
tokenizer=tokenizer,
model_kwargs=_model_kwargs,
**kwargs,
)
@staticmethod
def _load_model_general(
model_class: Any,
load_function_name: str,
model_id: str,
load_kwargs: dict,
model_kwargs: dict,
) -> Any:
"""General function to attempt to load a model."""
try:
load_function = getattr(model_class, load_function_name)
return load_function(model_id, **{**load_kwargs, **model_kwargs})
except Exception as e:
logger.error(
f"Failed to load model using "
f"{model_class.__name__}.{load_function_name}: {e}"
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
return "ipex-llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
from transformers import TextStreamer
input_ids = self.tokenizer.encode(prompt, return_tensors="pt")
input_ids = input_ids.to(self.model.device)
streamer = TextStreamer(
self.tokenizer, skip_prompt=True, skip_special_tokens=True
)
if stop is not None:
from transformers.generation.stopping_criteria import (
StoppingCriteriaList,
)
from transformers.tools.agents import StopSequenceCriteria
# stop generation when stop words are encountered
# TODO: stop generation when the following one is stop word
stopping_criteria = StoppingCriteriaList(
[StopSequenceCriteria(stop, self.tokenizer)]
)
else:
stopping_criteria = None
output = self.model.generate(
input_ids,
streamer=streamer,
stopping_criteria=stopping_criteria,
**kwargs,
)
text = self.tokenizer.decode(output[0], skip_special_tokens=True)
return text
else:
input_ids = self.tokenizer.encode(prompt, return_tensors="pt")
input_ids = input_ids.to(self.model.device)
if stop is not None:
from transformers.generation.stopping_criteria import (
StoppingCriteriaList,
)
from transformers.tools.agents import StopSequenceCriteria
stopping_criteria = StoppingCriteriaList(
[StopSequenceCriteria(stop, self.tokenizer)]
)
else:
stopping_criteria = None
output = self.model.generate(
input_ids, stopping_criteria=stopping_criteria, **kwargs
)
text = self.tokenizer.decode(output[0], skip_special_tokens=True)[
len(prompt) :
]
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/bigdl_llm.py | import logging
from typing import Any, Optional
from langchain_core.language_models.llms import LLM
from langchain_community.llms.ipex_llm import IpexLLM
logger = logging.getLogger(__name__)
class BigdlLLM(IpexLLM):
"""Wrapper around the BigdlLLM model
Example:
.. code-block:: python
from langchain_community.llms import BigdlLLM
llm = BigdlLLM.from_model_id(model_id="THUDM/chatglm-6b")
"""
@classmethod
def from_model_id(
cls,
model_id: str,
model_kwargs: Optional[dict] = None,
*,
tokenizer_id: Optional[str] = None,
load_in_4bit: bool = True,
load_in_low_bit: Optional[str] = None,
**kwargs: Any,
) -> LLM:
"""
Construct object from model_id
Args:
model_id: Path for the huggingface repo id to be downloaded or
the huggingface checkpoint folder.
tokenizer_id: Path for the huggingface repo id to be downloaded or
the huggingface checkpoint folder which contains the tokenizer.
model_kwargs: Keyword arguments to pass to the model and tokenizer.
kwargs: Extra arguments to pass to the model and tokenizer.
Returns:
An object of BigdlLLM.
"""
logger.warning("BigdlLLM was deprecated. Please use IpexLLM instead.")
try:
from bigdl.llm.transformers import (
AutoModel,
AutoModelForCausalLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
except ImportError:
raise ImportError(
"Could not import bigdl-llm or transformers. "
"Please install it with `pip install --pre --upgrade bigdl-llm[all]`."
)
if load_in_low_bit is not None:
logger.warning(
"""`load_in_low_bit` option is not supported in BigdlLLM and
is ignored. For more data types support with `load_in_low_bit`,
use IpexLLM instead."""
)
if not load_in_4bit:
raise ValueError(
"BigdlLLM only supports loading in 4-bit mode, "
"i.e. load_in_4bit = True. "
"Please install it with `pip install --pre --upgrade bigdl-llm[all]`."
)
_model_kwargs = model_kwargs or {}
_tokenizer_id = tokenizer_id or model_id
try:
tokenizer = AutoTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
except Exception:
tokenizer = LlamaTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
try:
model = AutoModelForCausalLM.from_pretrained(
model_id, load_in_4bit=True, **_model_kwargs
)
except Exception:
model = AutoModel.from_pretrained(
model_id, load_in_4bit=True, **_model_kwargs
)
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
return cls(
model_id=model_id,
model=model,
tokenizer=tokenizer,
model_kwargs=_model_kwargs,
**kwargs,
)
@classmethod
def from_model_id_low_bit(
cls,
model_id: str,
model_kwargs: Optional[dict] = None,
*,
tokenizer_id: Optional[str] = None,
**kwargs: Any,
) -> LLM:
"""
Construct low_bit object from model_id
Args:
model_id: Path for the bigdl-llm transformers low-bit model folder.
tokenizer_id: Path for the huggingface repo id or local model folder
which contains the tokenizer.
model_kwargs: Keyword arguments to pass to the model and tokenizer.
kwargs: Extra arguments to pass to the model and tokenizer.
Returns:
An object of BigdlLLM.
"""
logger.warning("BigdlLLM was deprecated. Please use IpexLLM instead.")
try:
from bigdl.llm.transformers import (
AutoModel,
AutoModelForCausalLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
except ImportError:
raise ImportError(
"Could not import bigdl-llm or transformers. "
"Please install it with `pip install --pre --upgrade bigdl-llm[all]`."
)
_model_kwargs = model_kwargs or {}
_tokenizer_id = tokenizer_id or model_id
try:
tokenizer = AutoTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
except Exception:
tokenizer = LlamaTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)
try:
model = AutoModelForCausalLM.load_low_bit(model_id, **_model_kwargs)
except Exception:
model = AutoModel.load_low_bit(model_id, **_model_kwargs)
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
return cls(
model_id=model_id,
model=model,
tokenizer=tokenizer,
model_kwargs=_model_kwargs,
**kwargs,
)
@property
def _llm_type(self) -> str:
return "bigdl-llm"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/vertexai.py | from __future__ import annotations
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Iterator, List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.vertexai import (
create_retry_decorator,
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
from google.cloud.aiplatform.models import Prediction
from google.protobuf.struct_pb2 import Value
from vertexai.language_models._language_models import (
TextGenerationResponse,
_LanguageModel,
)
from vertexai.preview.generative_models import Image
# This is for backwards compatibility
# We can remove after `langchain` stops importing it
_response_to_generation = None
stream_completion_with_retry = None
def is_codey_model(model_name: str) -> bool:
"""Return True if the model name is a Codey model."""
return "code" in model_name
def is_gemini_model(model_name: str) -> bool:
"""Return True if the model name is a Gemini model."""
return model_name is not None and "gemini" in model_name
def completion_with_retry( # type: ignore[no-redef]
llm: VertexAI,
prompt: List[Union[str, "Image"]],
stream: bool = False,
is_gemini: bool = False,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(
prompt: List[Union[str, "Image"]], is_gemini: bool = False, **kwargs: Any
) -> Any:
if is_gemini:
return llm.client.generate_content(
prompt, stream=stream, generation_config=kwargs
)
else:
if stream:
return llm.client.predict_streaming(prompt[0], **kwargs)
return llm.client.predict(prompt[0], **kwargs)
return _completion_with_retry(prompt, is_gemini, **kwargs)
async def acompletion_with_retry(
llm: VertexAI,
prompt: str,
is_gemini: bool = False,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _acompletion_with_retry(
prompt: str, is_gemini: bool = False, **kwargs: Any
) -> Any:
if is_gemini:
return await llm.client.generate_content_async(
prompt, generation_config=kwargs
)
return await llm.client.predict_async(prompt, **kwargs)
return await _acompletion_with_retry(prompt, is_gemini, **kwargs)
class _VertexAIBase(BaseModel):
model_config = ConfigDict(protected_namespaces=())
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"
"The default location to use when making API calls."
request_parallelism: int = 5
"The amount of parallelism allowed for requests issued to VertexAI models. "
"Default is 5."
max_retries: int = 6
"""The maximum number of retries to make when generating."""
task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True)
stop: Optional[List[str]] = None
"Optional list of stop words to use when generating."
model_name: Optional[str] = None
"Underlying model name."
@classmethod
def _get_task_executor(cls, request_parallelism: int = 5) -> Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
class _VertexAICommon(_VertexAIBase): # type: ignore[override]
client: "_LanguageModel" = None #: :meta private:
client_preview: "_LanguageModel" = None #: :meta private:
model_name: str
"Underlying model name."
temperature: float = 0.0
"Sampling temperature, it controls the degree of randomness in token selection."
max_output_tokens: int = 128
"Token limit determines the maximum amount of text output from one prompt."
top_p: float = 0.95
"Tokens are selected from most probable to least until the sum of their "
"probabilities equals the top-p value. Top-p is ignored for Codey models."
top_k: int = 40
"How the model selects tokens for output, the next token is selected from "
"among the top-k most probable tokens. Top-k is ignored for Codey models."
credentials: Any = Field(default=None, exclude=True)
"The default custom credentials (google.auth.credentials.Credentials) to use "
"when making API calls. If not provided, credentials will be ascertained from "
"the environment."
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
@property
def _llm_type(self) -> str:
return "vertexai"
@property
def is_codey_model(self) -> bool:
return is_codey_model(self.model_name)
@property
def _is_gemini_model(self) -> bool:
return is_gemini_model(self.model_name)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Gets the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _default_params(self) -> Dict[str, Any]:
params = {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
"candidate_count": self.n,
}
if not self.is_codey_model:
params.update(
{
"top_k": self.top_k,
"top_p": self.top_p,
}
)
return params
@classmethod
def _try_init_vertexai(cls, values: Dict) -> None:
allowed_params = ["project", "location", "credentials"]
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
def _prepare_params(
self,
stop: Optional[List[str]] = None,
stream: bool = False,
**kwargs: Any,
) -> dict:
stop_sequences = stop or self.stop
params_mapping = {"n": "candidate_count"}
params = {params_mapping.get(k, k): v for k, v in kwargs.items()}
params = {**self._default_params, "stop_sequences": stop_sequences, **params}
if stream or self.streaming:
params.pop("candidate_count")
return params
@deprecated(
since="0.0.12",
removal="1.0",
alternative_import="langchain_google_vertexai.VertexAI",
)
class VertexAI(_VertexAICommon, BaseLLM): # type: ignore[override]
"""Google Vertex AI large language models."""
model_name: str = "text-bison"
"The name of the Vertex AI large language model."
tuned_model_name: Optional[str] = None
"The name of a tuned model. If provided, model_name is ignored."
@classmethod
def is_lc_serializable(self) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "vertexai"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
tuned_model_name = values.get("tuned_model_name")
model_name = values["model_name"]
is_gemini = is_gemini_model(values["model_name"])
cls._try_init_vertexai(values)
try:
from vertexai.language_models import (
CodeGenerationModel,
TextGenerationModel,
)
from vertexai.preview.language_models import (
CodeGenerationModel as PreviewCodeGenerationModel,
)
from vertexai.preview.language_models import (
TextGenerationModel as PreviewTextGenerationModel,
)
if is_gemini:
from vertexai.preview.generative_models import (
GenerativeModel,
)
if is_codey_model(model_name):
model_cls = CodeGenerationModel
preview_model_cls = PreviewCodeGenerationModel
elif is_gemini:
model_cls = GenerativeModel
preview_model_cls = GenerativeModel
else:
model_cls = TextGenerationModel
preview_model_cls = PreviewTextGenerationModel
if tuned_model_name:
values["client"] = model_cls.get_tuned_model(tuned_model_name)
values["client_preview"] = preview_model_cls.get_tuned_model(
tuned_model_name
)
else:
if is_gemini:
values["client"] = model_cls(model_name=model_name)
values["client_preview"] = preview_model_cls(model_name=model_name)
else:
values["client"] = model_cls.from_pretrained(model_name)
values["client_preview"] = preview_model_cls.from_pretrained(
model_name
)
except ImportError:
raise_vertex_import_error()
if values["streaming"] and values["n"] > 1:
raise ValueError("Only one candidate can be generated with streaming!")
return values
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
try:
result = self.client_preview.count_tokens([text])
except AttributeError:
raise_vertex_import_error()
return result.total_tokens
def _response_to_generation(
self, response: TextGenerationResponse
) -> GenerationChunk:
"""Converts a stream response to a generation chunk."""
try:
generation_info = {
"is_blocked": response.is_blocked,
"safety_attributes": response.safety_attributes,
}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
should_stream = stream if stream is not None else self.streaming
params = self._prepare_params(stop=stop, stream=should_stream, **kwargs)
generations: List[List[Generation]] = []
for prompt in prompts:
if should_stream:
generation = GenerationChunk(text="")
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
generation += chunk
generations.append([generation])
else:
res = completion_with_retry( # type: ignore[misc]
self,
[prompt],
stream=should_stream,
is_gemini=self._is_gemini_model,
run_manager=run_manager,
**params,
)
generations.append(
[self._response_to_generation(r) for r in res.candidates]
)
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
params = self._prepare_params(stop=stop, **kwargs)
generations = []
for prompt in prompts:
res = await acompletion_with_retry(
self,
prompt,
is_gemini=self._is_gemini_model,
run_manager=run_manager,
**params,
)
generations.append(
[self._response_to_generation(r) for r in res.candidates]
)
return LLMResult(generations=generations) # type: ignore[arg-type]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._prepare_params(stop=stop, stream=True, **kwargs)
for stream_resp in completion_with_retry( # type: ignore[misc]
self,
[prompt],
stream=True,
is_gemini=self._is_gemini_model,
run_manager=run_manager,
**params,
):
chunk = self._response_to_generation(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
@deprecated(
since="0.0.12",
removal="1.0",
alternative_import="langchain_google_vertexai.VertexAIModelGarden",
)
class VertexAIModelGarden(_VertexAIBase, BaseLLM):
"""Vertex AI Model Garden large language models."""
client: "PredictionServiceClient" = (
None #: :meta private: # type: ignore[assignment]
)
async_client: "PredictionServiceAsyncClient" = (
None #: :meta private: # type: ignore[assignment]
)
endpoint_id: str
"A name of an endpoint where the model has been deployed."
allowed_model_args: Optional[List[str]] = None
"Allowed optional args to be passed to the model."
prompt_arg: str = "prompt"
result_arg: Optional[str] = "generated_text"
"Set result_arg to None if output of the model is expected to be a string."
"Otherwise, if it's a dict, provided an argument that contains the result."
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
except ImportError:
raise_vertex_import_error()
if not values["project"]:
raise ValueError(
"A GCP project should be provided to run inference on Model Garden!"
)
client_options = ClientOptions(
api_endpoint=f"{values['location']}-aiplatform.googleapis.com"
)
client_info = get_client_info(module="vertex-ai-model-garden")
values["client"] = PredictionServiceClient(
client_options=client_options, client_info=client_info
)
values["async_client"] = PredictionServiceAsyncClient(
client_options=client_options, client_info=client_info
)
return values
@property
def endpoint_path(self) -> str:
return self.client.endpoint_path(
project=self.project, # type: ignore[arg-type]
location=self.location,
endpoint=self.endpoint_id,
)
@property
def _llm_type(self) -> str:
return "vertexai_model_garden"
def _prepare_request(self, prompts: List[str], **kwargs: Any) -> List["Value"]:
try:
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
except ImportError:
raise ImportError(
"protobuf package not found, please install it with"
" `pip install protobuf`"
)
instances = []
for prompt in prompts:
if self.allowed_model_args:
instance = {
k: v for k, v in kwargs.items() if k in self.allowed_model_args
}
else:
instance = {}
instance[self.prompt_arg] = prompt
instances.append(instance)
predict_instances = [
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
]
return predict_instances
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = self.client.predict(endpoint=self.endpoint_path, instances=instances)
return self._parse_response(response)
def _parse_response(self, predictions: "Prediction") -> LLMResult:
generations: List[List[Generation]] = []
for result in predictions.predictions:
generations.append(
[
Generation(text=self._parse_prediction(prediction))
for prediction in result
]
)
return LLMResult(generations=generations)
def _parse_prediction(self, prediction: Any) -> str:
if isinstance(prediction, str):
return prediction
if self.result_arg:
try:
return prediction[self.result_arg]
except KeyError:
if isinstance(prediction, str):
error_desc = (
"Provided non-None `result_arg` (result_arg="
f"{self.result_arg}). But got prediction of type "
f"{type(prediction)} instead of dict. Most probably, you"
"need to set `result_arg=None` during VertexAIModelGarden "
"initialization."
)
raise ValueError(error_desc)
else:
raise ValueError(f"{self.result_arg} key not found in prediction!")
return prediction
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = await self.async_client.predict(
endpoint=self.endpoint_path, instances=instances
)
return self._parse_response(response)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/arcee.py | from typing import Any, Dict, List, Optional, Union, cast
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import ConfigDict, SecretStr, model_validator
from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter
class Arcee(LLM):
"""Arcee's Domain Adapted Language Models (DALMs).
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
or pass ``arcee_api_key`` as a named parameter.
Example:
.. code-block:: python
from langchain_community.llms import Arcee
arcee = Arcee(
model="DALM-PubMed",
arcee_api_key="ARCEE-API-KEY"
)
response = arcee("AI-driven music therapy")
"""
_client: Optional[ArceeWrapper] = None #: :meta private:
"""Arcee _client."""
arcee_api_key: Union[SecretStr, str, None] = None
"""Arcee API Key"""
model: str
"""Arcee DALM name"""
arcee_api_url: str = "https://api.arcee.ai"
"""Arcee API URL"""
arcee_api_version: str = "v2"
"""Arcee API Version"""
arcee_app_url: str = "https://app.arcee.ai"
"""Arcee App URL"""
model_id: str = ""
"""Arcee Model ID"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Keyword arguments to pass to the model."""
model_config = ConfigDict(
extra="forbid",
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "arcee"
def __init__(self, **data: Any) -> None:
"""Initializes private fields."""
super().__init__(**data)
api_key = cast(SecretStr, self.arcee_api_key)
self._client = ArceeWrapper(
arcee_api_key=api_key,
arcee_api_url=self.arcee_api_url,
arcee_api_version=self.arcee_api_version,
model_kwargs=self.model_kwargs,
model_name=self.model,
)
@model_validator(mode="before")
@classmethod
def validate_environments(cls, values: Dict) -> Any:
"""Validate Arcee environment variables."""
# validate env vars
values["arcee_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"arcee_api_key",
"ARCEE_API_KEY",
)
)
values["arcee_api_url"] = get_from_dict_or_env(
values,
"arcee_api_url",
"ARCEE_API_URL",
)
values["arcee_app_url"] = get_from_dict_or_env(
values,
"arcee_app_url",
"ARCEE_APP_URL",
)
values["arcee_api_version"] = get_from_dict_or_env(
values,
"arcee_api_version",
"ARCEE_API_VERSION",
)
# validate model kwargs
if values.get("model_kwargs"):
kw = values["model_kwargs"]
# validate size
if kw.get("size") is not None:
if not kw.get("size") >= 0:
raise ValueError("`size` must be positive")
# validate filters
if kw.get("filters") is not None:
if not isinstance(kw.get("filters"), List):
raise ValueError("`filters` must be a list")
for f in kw.get("filters"):
DALMFilter(**f)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError("Client is not initialized.")
return self._client.generate(prompt=prompt, **kwargs)
except Exception as e:
raise Exception(f"Failed to generate text: {e}") from e
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/huggingface_text_gen_inference.py | import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_pydantic_field_names, pre_init
from pydantic import ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
@deprecated(
"0.0.21",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFaceEndpoint",
)
class HuggingFaceTextGenInference(LLM):
"""
HuggingFace text generation API.
! This class is deprecated, you should use HuggingFaceEndpoint instead !
To use, you should have the `text-generation` python package installed and
a text-generation server running.
Example:
.. code-block:: python
# Basic Example (no streaming)
llm = HuggingFaceTextGenInference(
inference_server_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
)
print(llm.invoke("What is Deep Learning?")) # noqa: T201
# Streaming response example
from langchain_community.callbacks import streaming_stdout
callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()]
llm = HuggingFaceTextGenInference(
inference_server_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
callbacks=callbacks,
streaming=True
)
print(llm.invoke("What is Deep Learning?")) # noqa: T201
"""
max_new_tokens: int = 512
"""Maximum number of generated tokens"""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens to keep for
top-k-filtering."""
top_p: Optional[float] = 0.95
"""If set to < 1, only the smallest set of most probable tokens with probabilities
that add up to `top_p` or higher are kept for generation."""
typical_p: Optional[float] = 0.95
"""Typical Decoding mass. See [Typical Decoding for Natural Language
Generation](https://arxiv.org/abs/2202.00666) for more information."""
temperature: Optional[float] = 0.8
"""The value used to module the logits distribution."""
repetition_penalty: Optional[float] = None
"""The parameter for repetition penalty. 1.0 means no penalty.
See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details."""
return_full_text: bool = False
"""Whether to prepend the prompt to the generated text"""
truncate: Optional[int] = None
"""Truncate inputs tokens to the given size"""
stop_sequences: List[str] = Field(default_factory=list)
"""Stop generating tokens if a member of `stop_sequences` is generated"""
seed: Optional[int] = None
"""Random sampling seed"""
inference_server_url: str = ""
"""text-generation-inference instance base url"""
timeout: int = 120
"""Timeout in seconds"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
do_sample: bool = False
"""Activate logits sampling"""
watermark: bool = False
"""Watermarking with [A Watermark for Large Language Models]
(https://arxiv.org/abs/2301.10226)"""
server_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any text-generation-inference server parameters not explicitly specified"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `call` not explicitly specified"""
client: Any = None
async_client: Any = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
import text_generation
values["client"] = text_generation.Client(
values["inference_server_url"],
timeout=values["timeout"],
**values["server_kwargs"],
)
values["async_client"] = text_generation.AsyncClient(
values["inference_server_url"],
timeout=values["timeout"],
**values["server_kwargs"],
)
except ImportError:
raise ImportError(
"Could not import text_generation python package. "
"Please install it with `pip install text_generation`."
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_textgen_inference"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling text generation inference API."""
return {
"max_new_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"typical_p": self.typical_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"return_full_text": self.return_full_text,
"truncate": self.truncate,
"stop_sequences": self.stop_sequences,
"seed": self.seed,
"do_sample": self.do_sample,
"watermark": self.watermark,
**self.model_kwargs,
}
def _invocation_params(
self, runtime_stop: Optional[List[str]], **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
params["stop_sequences"] = params["stop_sequences"] + (runtime_stop or [])
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = self.client.generate(prompt, **invocation_params)
# remove stop sequences from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[
: res.generated_text.index(stop_seq)
]
return res.generated_text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = await self.async_client.generate(prompt, **invocation_params)
# remove stop sequences from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[
: res.generated_text.index(stop_seq)
]
return res.generated_text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
for res in self.client.generate_stream(prompt, **invocation_params):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.token.text:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if res.token.special:
text = None
elif stop_seq_found:
text = res.token.text[: res.token.text.index(stop_seq_found)]
else:
text = res.token.text
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
async for res in self.async_client.generate_stream(prompt, **invocation_params):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.token.text:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if res.token.special:
text = None
elif stop_seq_found:
text = res.token.text[: res.token.text.index(stop_seq_found)]
else:
text = res.token.text
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/edenai.py | """Wrapper around EdenAI's Generation API."""
import logging
from typing import Any, Dict, List, Literal, Optional
from aiohttp import ClientSession
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_community.utilities.requests import Requests
logger = logging.getLogger(__name__)
class EdenAI(LLM):
"""EdenAI models.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
`feature` and `subfeature` are required, but any other model parameters can also be
passed in with the format params={model_param: value, ...}
for api reference check edenai documentation: http://docs.edenai.co.
"""
base_url: str = "https://api.edenai.run/v2"
edenai_api_key: Optional[str] = None
feature: Literal["text", "image"] = "text"
"""Which generative feature to use, use text by default"""
subfeature: Literal["generation"] = "generation"
"""Subfeature of above feature, use generation by default"""
provider: str
"""Generative provider to use (eg: openai,stabilityai,cohere,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
# Optional parameters to add depending of chosen feature
# see api reference for more infos
temperature: Optional[float] = Field(default=None, ge=0, le=1) # for text
max_tokens: Optional[int] = Field(default=None, ge=0) # for text
resolution: Optional[Literal["256x256", "512x512", "1024x1024"]] = None # for image
params: Dict[str, Any] = Field(default_factory=dict)
"""
DEPRECATED: use temperature, max_tokens, resolution directly
optional parameters to pass to api
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra parameters"""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in get_fields(cls).values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "edenai"
def _format_output(self, output: dict) -> str:
if self.feature == "text":
return output[self.provider]["generated_text"]
else:
return output[self.provider]["items"][0]["image"]
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
provider_response = data[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(data)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call EdenAi model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A callback manager for async interaction with LLMs.
Returns:
The string generated by the model.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter `None` values to not pass them to the http payload as null
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
if response.status >= 500:
raise Exception(f"EdenAI Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"EdenAI received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
provider_response = response_json[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(response_json)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/ctransformers.py | from functools import partial
from typing import Any, Dict, List, Optional, Sequence
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
class CTransformers(LLM):
"""C Transformers LLM models.
To use, you should have the ``ctransformers`` python package installed.
See https://github.com/marella/ctransformers
Example:
.. code-block:: python
from langchain_community.llms import CTransformers
llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2")
"""
client: Any #: :meta private:
model: str
"""The path to a model file or directory or the name of a Hugging Face Hub
model repo."""
model_type: Optional[str] = None
"""The model type."""
model_file: Optional[str] = None
"""The name of the model file in repo or directory."""
config: Optional[Dict[str, Any]] = None
"""The config parameters.
See https://github.com/marella/ctransformers#config"""
lib: Optional[str] = None
"""The path to a shared library or one of `avx2`, `avx`, `basic`."""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_type": self.model_type,
"model_file": self.model_file,
"config": self.config,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ctransformers"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that ``ctransformers`` package is installed."""
try:
from ctransformers import AutoModelForCausalLM
except ImportError:
raise ImportError(
"Could not import `ctransformers` package. "
"Please install it with `pip install ctransformers`"
)
config = values["config"] or {}
values["client"] = AutoModelForCausalLM.from_pretrained(
values["model"],
model_type=values["model_type"],
model_file=values["model_file"],
lib=values["lib"],
**config,
)
return values
def _call(
self,
prompt: str,
stop: Optional[Sequence[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm.invoke("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return "".join(text)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Asynchronous Call out to CTransformers generate method.
Very helpful when streaming (like with websockets!)
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm.invoke("Once upon a time, ")
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
for token in self.client(prompt, stop=stop, stream=True):
if text_callback:
await text_callback(token)
text += token
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/huggingface_hub.py | import json
from typing import Any, Dict, List, Mapping, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
# key: task
# value: key in the output dictionary
VALID_TASKS_DICT = {
"translation": "translation_text",
"summarization": "summary_text",
"conversational": "generated_text",
"text-generation": "generated_text",
"text2text-generation": "generated_text",
}
@deprecated(
"0.0.21",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFaceEndpoint",
)
class HuggingFaceHub(LLM):
"""HuggingFaceHub models.
! This class is deprecated, you should use HuggingFaceEndpoint instead.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Supports `text-generation`, `text2text-generation`, `conversational`, `translation`,
and `summarization`.
Example:
.. code-block:: python
from langchain_community.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any = None #: :meta private:
repo_id: Optional[str] = None
"""Model name to use.
If not provided, the default model for the chosen task will be used."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text`, `summary_text`,
or `translation_text`."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub import HfApi, InferenceClient
repo_id = values["repo_id"]
client = InferenceClient(
model=repo_id,
token=huggingfacehub_api_token,
)
if not values["task"]:
if not repo_id:
raise ValueError(
"Must specify either `repo_id` or `task`, or both."
)
# Use the recommended task for the chosen model
model_info = HfApi(token=huggingfacehub_api_token).model_info(
repo_id=repo_id
)
values["task"] = model_info.pipeline_tag
if values["task"] not in VALID_TASKS_DICT:
raise ValueError(
f"Got invalid task {values['task']}, "
f"currently only {VALID_TASKS_DICT.keys()} are supported"
)
values["client"] = client
except ImportError:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"repo_id": self.repo_id, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_hub"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
parameters = {**_model_kwargs, **kwargs}
response = self.client.post(
json={"inputs": prompt, "parameters": parameters}, task=self.task
)
response = json.loads(response.decode())
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
response_key = VALID_TASKS_DICT[self.task] # type: ignore
if isinstance(response, list):
text = response[0][response_key]
else:
text = response[response_key]
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/xinference.py | from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
if TYPE_CHECKING:
from xinference.client import RESTfulChatModelHandle, RESTfulGenerateModelHandle
from xinference.model.llm.core import LlamaCppGenerateConfig
class Xinference(LLM):
"""`Xinference` large-scale model inference service.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install "xinference[all]"
If you're simply using the services provided by Xinference, you can utilize the xinference_client package:
.. code-block:: bash
pip install xinference_client
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then, you can use Xinference with LangChain.
Example:
.. code-block:: python
from langchain_community.llms import Xinference
llm = Xinference(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
llm.invoke(
prompt="Q: where can we visit in the capital of France? A:",
generate_config={"max_tokens": 1024, "stream": True},
)
To view all the supported builtin models, run:
.. code-block:: bash
$ xinference list --all
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
model_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to xinference.LLM"""
def __init__(
self,
server_url: Optional[str] = None,
model_uid: Optional[str] = None,
**model_kwargs: Any,
):
try:
from xinference.client import RESTfulClient
except ImportError:
try:
from xinference_client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference` or `pip install xinference_client`."
) from e
model_kwargs = model_kwargs or {}
super().__init__(
**{ # type: ignore[arg-type]
"server_url": server_url,
"model_uid": model_uid,
"model_kwargs": model_kwargs,
}
)
if self.server_url is None:
raise ValueError("Please provide server URL")
if self.model_uid is None:
raise ValueError("Please provide the model UID")
self.client = RESTfulClient(server_url)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "xinference"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"server_url": self.server_url},
**{"model_uid": self.model_uid},
**{"model_kwargs": self.model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
"""
model = self.client.get_model(self.model_uid)
generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {})
generate_config = {**self.model_kwargs, **generate_config}
if stop:
generate_config["stop"] = stop
if generate_config and generate_config.get("stream"):
combined_text_output = ""
for token in self._stream_generate(
model=model,
prompt=prompt,
run_manager=run_manager,
generate_config=generate_config,
):
combined_text_output += token
return combined_text_output
else:
completion = model.generate(prompt=prompt, generate_config=generate_config)
return completion["choices"][0]["text"]
def _stream_generate(
self,
model: Union["RESTfulGenerateModelHandle", "RESTfulChatModelHandle"],
prompt: str,
run_manager: Optional[CallbackManagerForLLMRun] = None,
generate_config: Optional["LlamaCppGenerateConfig"] = None,
) -> Generator[str, None, None]:
"""
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
"""
streaming_response = model.generate(
prompt=prompt, generate_config=generate_config
)
for chunk in streaming_response:
if isinstance(chunk, dict):
choices = chunk.get("choices", [])
if choices:
choice = choices[0]
if isinstance(choice, dict):
token = choice.get("text", "")
log_probs = choice.get("logprobs")
if run_manager:
run_manager.on_llm_new_token(
token=token, verbose=self.verbose, log_probs=log_probs
)
yield token
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/anyscale.py | """Wrapper around Anyscale Endpoint"""
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Set,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import (
BaseOpenAI,
acompletion_with_retry,
completion_with_retry,
)
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def create_llm_result(
choices: Any, prompts: List[str], token_usage: Dict[str, int], model_name: str
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
choice = choices[i]
generations.append(
[
Generation(
text=choice["message"]["content"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
]
)
llm_output = {"token_usage": token_usage, "model_name": model_name}
return LLMResult(generations=generations, llm_output=llm_output)
class Anyscale(BaseOpenAI): # type: ignore[override]
"""Anyscale large language models.
To use, you should have the environment variable ``ANYSCALE_API_KEY``set with your
Anyscale Endpoint, or pass it as a named parameter to the constructor.
To use with Anyscale Private Endpoint, please also set ``ANYSCALE_BASE_URL``.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscalellm = Anyscale(anyscale_api_key="ANYSCALE_API_KEY")
# To leverage Ray for parallel processing
@ray.remote(num_cpus=1)
def send_query(llm, text):
resp = llm.invoke(text)
return resp
futures = [send_query.remote(anyscalellm, text) for text in texts]
results = ray.get(futures)
"""
"""Key word arguments to pass to the model."""
anyscale_api_base: str = Field(default=DEFAULT_BASE_URL)
anyscale_api_key: SecretStr = Field(default=SecretStr(""))
model_name: str = Field(default=DEFAULT_MODEL)
prefix_messages: List = Field(default_factory=list)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_BASE_URL,
)
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "anyscale_api_key", "ANYSCALE_API_KEY")
)
values["model_name"] = get_from_dict_or_env(
values,
"model_name",
"MODEL_NAME",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["anyscale_api_key"].get_secret_value(),
"base_url": values["anyscale_api_base"],
# To do: future support
# "organization": values["openai_organization"],
# "timeout": values["request_timeout"],
# "max_retries": values["max_retries"],
# "default_headers": values["default_headers"],
# "default_query": values["default_query"],
# "http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).completions
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.Completion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"model": self.model_name,
}
if not is_openai_v1():
openai_creds.update(
{
"api_key": self.anyscale_api_key.get_secret_value(),
"api_base": self.anyscale_api_base,
}
)
return {**openai_creds, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Anyscale LLM"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
## THis is the ONLY change from BaseOpenAI()._generate()
self,
prompt=_prompts[0],
run_manager=run_manager,
**params,
)
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get("system_fingerprint")
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
## THis is the ONLY change from BaseOpenAI()._agenerate()
self,
prompt=_prompts[0],
run_manager=run_manager,
**params,
)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/stochasticai.py | import logging
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class StochasticAI(LLM):
"""StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
"""
api_url: str = ""
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
stochasticai_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = set(list(cls.model_fields.keys()))
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
stochasticai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "stochasticai_api_key", "STOCHASTICAI_API_KEY")
)
values["stochasticai_api_key"] = stochasticai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.api_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "stochasticai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to StochasticAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = StochasticAI("Tell me a joke.")
"""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response_post = requests.post(
url=self.api_url,
json={"prompt": prompt, "params": params},
headers={
"apiKey": f"{self.stochasticai_api_key.get_secret_value()}", # type: ignore[union-attr]
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_post.raise_for_status()
response_post_json = response_post.json()
completed = False
while not completed:
response_get = requests.get(
url=response_post_json["data"]["responseUrl"],
headers={
"apiKey": f"{self.stochasticai_api_key.get_secret_value()}", # type: ignore[union-attr]
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_get.raise_for_status()
response_get_json = response_get.json()["data"]
text = response_get_json.get("completion")
completed = text is not None
time.sleep(0.5)
text = text[0]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/openlm.py | from typing import Any, Dict
from langchain_core.utils import pre_init
from langchain_community.llms.openai import BaseOpenAI
class OpenLM(BaseOpenAI):
"""OpenLM models."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
try:
import openlm
values["client"] = openlm.Completion
except ImportError:
raise ImportError(
"Could not import openlm python package. "
"Please install it with `pip install openlm`."
)
if values["streaming"]:
raise ValueError("Streaming not supported with openlm")
return values
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/minimax.py | """Wrapper around Minimax APIs."""
from __future__ import annotations
import logging
from typing import (
Any,
Dict,
List,
Optional,
)
import requests
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class _MinimaxEndpointClient(BaseModel):
"""API client for the Minimax LLM endpoint."""
host: str
group_id: str
api_key: SecretStr
api_url: str
@model_validator(mode="before")
@classmethod
def set_api_url(cls, values: Dict[str, Any]) -> Any:
if "api_url" not in values:
host = values["host"]
group_id = values["group_id"]
api_url = f"{host}/v1/text/chatcompletion?GroupId={group_id}"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_key.get_secret_value()}"}
response = requests.post(self.api_url, headers=headers, json=request)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
if response.json()["base_resp"]["status_code"] > 0:
raise ValueError(
f"API {response.json()['base_resp']['status_code']}"
f" error: {response.json()['base_resp']['status_msg']}"
)
return response.json()["reply"]
class MinimaxCommon(BaseModel):
"""Common parameters for Minimax large language models."""
model_config = ConfigDict(protected_namespaces=())
_client: _MinimaxEndpointClient
model: str = "abab5.5-chat"
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.7
"""A non-negative float that tunes the degree of randomness in generation."""
top_p: float = 0.95
"""Total probability mass of tokens to consider at each step."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
minimax_api_host: Optional[str] = None
minimax_group_id: Optional[str] = None
minimax_api_key: Optional[SecretStr] = None
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["minimax_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "minimax_api_key", "MINIMAX_API_KEY")
)
values["minimax_group_id"] = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
# Get custom api url from environment.
values["minimax_api_host"] = get_from_dict_or_env(
values,
"minimax_api_host",
"MINIMAX_API_HOST",
default="https://api.minimax.chat",
)
values["_client"] = _MinimaxEndpointClient( # type: ignore[call-arg]
host=values["minimax_api_host"],
api_key=values["minimax_api_key"],
group_id=values["minimax_group_id"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model,
"tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "minimax"
class Minimax(MinimaxCommon, LLM):
"""Minimax large language models.
To use, you should have the environment variable
``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key,
or pass them as a named parameter to the constructor.
Example:
. code-block:: python
from langchain_community.llms.minimax import Minimax
minimax = Minimax(model="<model_name>", minimax_api_key="my-api-key",
minimax_group_id="my-group-id")
"""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.")
"""
request = self._default_params
request["messages"] = [{"sender_type": "USER", "text": prompt}]
request.update(kwargs)
text = self._client.post(request)
if stop is not None:
# This is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/konko.py | """Wrapper around Konko AI's Completion API."""
import logging
import warnings
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict, SecretStr, model_validator
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
class Konko(LLM):
"""Konko AI models.
To use, you'll need an API key. This can be passed in as init param
``konko_api_key`` or set as environment variable ``KONKO_API_KEY``.
Konko AI API reference: https://docs.konko.ai/reference/
"""
base_url: str = "https://api.konko.ai/v1/completions"
"""Base inference API URL."""
konko_api_key: SecretStr
"""Konko AI API key."""
model: str
"""Model name. Available models listed here:
https://docs.konko.ai/reference/get_models
"""
temperature: Optional[float] = None
"""Model temperature."""
top_p: Optional[float] = None
"""Used to dynamically adjust the number of choices for each predicted token based
on the cumulative probabilities. A value of 1 will always yield the same
output. A temperature less than 1 favors more correctness and is appropriate
for question answering or summarization. A value greater than 1 introduces more
randomness in the output.
"""
top_k: Optional[int] = None
"""Used to limit the number of choices for the next predicted word or token. It
specifies the maximum number of tokens to consider at each step, based on their
probability of occurrence. This technique helps to speed up the generation
process and can improve the quality of the generated text by focusing on the
most likely options.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
repetition_penalty: Optional[float] = None
"""A number that controls the diversity of generated text by reducing the
likelihood of repeated sequences. Higher values decrease repetition.
"""
logprobs: Optional[int] = None
"""An integer that specifies how many top token log probabilities are included in
the response for each token generation step.
"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict[str, Any]) -> Any:
"""Validate that python package exists in environment."""
try:
import konko
except ImportError:
raise ImportError(
"Could not import konko python package. "
"Please install it with `pip install konko`."
)
if not hasattr(konko, "_is_legacy_openai"):
warnings.warn(
"You are using an older version of the 'konko' package. "
"Please consider upgrading to access new features"
"including the completion endpoint."
)
return values
def construct_payload(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
return {k: v for k, v in payload.items() if v is not None}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "konko"
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
@property
def default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Konko's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model..
"""
import konko
payload = self.construct_payload(prompt, stop, **kwargs)
try:
if is_openai_v1():
response = konko.completions.create(**payload)
else:
response = konko.Completion.create(**payload)
except AttributeError:
raise ValueError(
"`konko` has no `Completion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if is_openai_v1():
output = response.choices[0].text
else:
output = response["choices"][0]["text"]
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Asynchronously call out to Konko's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
"""
import konko
payload = self.construct_payload(prompt, stop, **kwargs)
try:
if is_openai_v1():
client = konko.AsyncKonko()
response = await client.completions.create(**payload)
else:
response = await konko.Completion.acreate(**payload)
except AttributeError:
raise ValueError(
"`konko` has no `Completion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if is_openai_v1():
output = response.choices[0].text
else:
output = response["choices"][0]["text"]
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/cloudflare_workersai.py | import json
import logging
from typing import Any, Dict, Iterator, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
logger = logging.getLogger(__name__)
class CloudflareWorkersAI(LLM):
"""Cloudflare Workers AI service.
To use, you must provide an API token and
account ID to access Cloudflare Workers AI, and
pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
my_account_id = "my_account_id"
my_api_token = "my_secret_api_token"
llm_model = "@cf/meta/llama-2-7b-chat-int8"
cf_ai = CloudflareWorkersAI(
account_id=my_account_id,
api_token=my_api_token,
model=llm_model
)
""" # noqa: E501
account_id: str
api_token: str
model: str = "@cf/meta/llama-2-7b-chat-int8"
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
streaming: bool = False
endpoint_url: str = ""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the Cloudflare Workers AI class."""
super().__init__(**kwargs)
self.endpoint_url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "cloudflare"
@property
def _default_params(self) -> Dict[str, Any]:
"""Default parameters"""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Identifying parameters"""
return {
"account_id": self.account_id,
"api_token": self.api_token,
"model": self.model,
"base_url": self.base_url,
}
def _call_api(self, prompt: str, params: Dict[str, Any]) -> requests.Response:
"""Call Cloudflare Workers API"""
headers = {"Authorization": f"Bearer {self.api_token}"}
data = {"prompt": prompt, "stream": self.streaming, **params}
response = requests.post(
self.endpoint_url, headers=headers, json=data, stream=self.streaming
)
return response
def _process_response(self, response: requests.Response) -> str:
"""Process API response"""
if response.ok:
data = response.json()
return data["result"]["response"]
else:
raise ValueError(f"Request failed with status {response.status_code}")
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Streaming prediction"""
original_steaming: bool = self.streaming
self.streaming = True
_response_prefix_count = len("data: ")
_response_stream_end = b"data: [DONE]"
for chunk in self._call_api(prompt, kwargs).iter_lines():
if chunk == _response_stream_end:
break
if len(chunk) > _response_prefix_count:
try:
data = json.loads(chunk[_response_prefix_count:])
except Exception as e:
logger.debug(chunk)
raise e
if data is not None and "response" in data:
if run_manager:
run_manager.on_llm_new_token(data["response"])
yield GenerationChunk(text=data["response"])
logger.debug("stream end")
self.streaming = original_steaming
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Regular prediction"""
if self.streaming:
return "".join(
[c.text for c in self._stream(prompt, stop, run_manager, **kwargs)]
)
else:
response = self._call_api(prompt, kwargs)
return self._process_response(response)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/pai_eas_endpoint.py | import json
import logging
from typing import Any, Dict, Iterator, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PaiEasEndpoint(LLM):
"""Langchain LLM class to help to access eass llm service.
To use this endpoint, must have a deployed eas chat llm service on PAI AliCloud.
One can set the environment variable ``eas_service_url`` and ``eas_service_token``.
The environment variables can set with your eas service url and service token.
Example:
.. code-block:: python
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
eas_chat_endpoint = PaiEasChatEndpoint(
eas_service_url="your_service_url",
eas_service_token="your_service_token"
)
"""
"""PAI-EAS Service URL"""
eas_service_url: str
"""PAI-EAS Service TOKEN"""
eas_service_token: str
"""PAI-EAS Service Infer Params"""
max_new_tokens: Optional[int] = 512
temperature: Optional[float] = 0.95
top_p: Optional[float] = 0.1
top_k: Optional[int] = 0
stop_sequences: Optional[List[str]] = None
"""Enable stream chat mode."""
streaming: bool = False
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
version: Optional[str] = "2.0"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["eas_service_url"] = get_from_dict_or_env(
values, "eas_service_url", "EAS_SERVICE_URL"
)
values["eas_service_token"] = get_from_dict_or_env(
values, "eas_service_token", "EAS_SERVICE_TOKEN"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pai_eas_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": [],
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"eas_service_url": self.eas_service_url,
"eas_service_token": self.eas_service_token,
**_model_kwargs,
}
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop"] = self.stop_sequences
else:
params["stop"] = stop_sequences
if self.model_kwargs:
params.update(self.model_kwargs)
return {**params, **kwargs}
@staticmethod
def _process_response(
response: Any, stop: Optional[List[str]], version: Optional[str]
) -> str:
if version == "1.0":
text = response
else:
text = response["response"]
if stop:
text = enforce_stop_tokens(text, stop)
return "".join(text)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = None
try:
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **params):
completion += chunk.text
return completion
else:
response = self._call_eas(prompt, params)
_stop = params.get("stop")
return self._process_response(response, _stop, self.version)
except Exception as error:
raise ValueError(f"Error raised by the service: {error}")
def _call_eas(self, prompt: str = "", params: Dict = {}) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Authorization": f"{self.eas_service_token}",
}
if self.version == "1.0":
body = {
"input_ids": f"{prompt}",
}
else:
body = {
"prompt": f"{prompt}",
}
# add params to body
for key, value in params.items():
body[key] = value
# make request
response = requests.post(self.eas_service_url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
try:
return json.loads(response.text)
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return response.text
raise e
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
headers = {
"User-Agent": "Test Client",
"Authorization": f"{self.eas_service_token}",
}
if self.version == "1.0":
pload = {"input_ids": prompt, **invocation_params}
response = requests.post(
self.eas_service_url, headers=headers, json=pload, stream=True
)
res = GenerationChunk(text=response.text)
if run_manager:
run_manager.on_llm_new_token(res.text)
# yield text, if any
yield res
else:
pload = {"prompt": prompt, "use_stream_chat": "True", **invocation_params}
response = requests.post(
self.eas_service_url, headers=headers, json=pload, stream=True
)
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
):
if chunk:
data = json.loads(chunk.decode("utf-8"))
output = data["response"]
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop"]:
if stop_seq in output:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = output[: output.index(stop_seq_found)]
else:
text = output
# yield text, if any
if text:
res = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(res.text)
yield res
# break if stop sequence found
if stop_seq_found:
break
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/symblai_nebula.py | import json
import logging
from typing import Any, Callable, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
from requests import ConnectTimeout, ReadTimeout, RequestException
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
DEFAULT_NEBULA_SERVICE_URL = "https://api-nebula.symbl.ai"
DEFAULT_NEBULA_SERVICE_PATH = "/v1/model/generate"
logger = logging.getLogger(__name__)
class Nebula(LLM):
"""Nebula Service models.
To use, you should have the environment variable ``NEBULA_SERVICE_URL``,
``NEBULA_SERVICE_PATH`` and ``NEBULA_API_KEY`` set with your Nebula
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Nebula
nebula = Nebula(
nebula_service_url="NEBULA_SERVICE_URL",
nebula_service_path="NEBULA_SERVICE_PATH",
nebula_api_key="NEBULA_API_KEY",
)
"""
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
"""Optional"""
nebula_service_url: Optional[str] = None
nebula_service_path: Optional[str] = None
nebula_api_key: Optional[SecretStr] = None
model: Optional[str] = None
max_new_tokens: Optional[int] = 128
temperature: Optional[float] = 0.6
top_p: Optional[float] = 0.95
repetition_penalty: Optional[float] = 1.0
top_k: Optional[int] = 1
stop_sequences: Optional[List[str]] = None
max_retries: Optional[int] = 10
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nebula_service_url = get_from_dict_or_env(
values,
"nebula_service_url",
"NEBULA_SERVICE_URL",
DEFAULT_NEBULA_SERVICE_URL,
)
nebula_service_path = get_from_dict_or_env(
values,
"nebula_service_path",
"NEBULA_SERVICE_PATH",
DEFAULT_NEBULA_SERVICE_PATH,
)
nebula_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "nebula_api_key", "NEBULA_API_KEY", None)
)
if nebula_service_url.endswith("/"):
nebula_service_url = nebula_service_url[:-1]
if not nebula_service_path.startswith("/"):
nebula_service_path = "/" + nebula_service_path
values["nebula_service_url"] = nebula_service_url
values["nebula_service_path"] = nebula_service_path
values["nebula_api_key"] = nebula_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"nebula_service_url": self.nebula_service_url,
"nebula_service_path": self.nebula_service_path,
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nebula"
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop_sequences
return {**params, **kwargs}
@staticmethod
def _process_response(response: Any, stop: Optional[List[str]]) -> str:
text = response["output"]["text"]
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = completion_with_retry(
self,
prompt=prompt,
params=params,
url=f"{self.nebula_service_url}{self.nebula_service_path}",
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
def make_request(
self: Nebula,
prompt: str,
url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}",
params: Optional[Dict] = None,
) -> Any:
"""Generate text from the model."""
params = params or {}
api_key = None
if self.nebula_api_key is not None:
api_key = self.nebula_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"ApiKey": f"{api_key}",
}
body = {"prompt": prompt}
# add params to body
for key, value in params.items():
body[key] = value
# make request
response = requests.post(url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return json.loads(response.text)
def _create_retry_decorator(llm: Nebula) -> Callable[[Any], Any]:
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
max_retries = llm.max_retries if llm.max_retries is not None else 3
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout))
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Nebula, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/sambanova.py | import json
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import requests
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, SecretStr
from requests import Response
class SambaStudio(LLM):
"""
SambaStudio large language models.
Setup:
To use, you should have the environment variables
``SAMBASTUDIO_URL`` set with your SambaStudio environment URL.
``SAMBASTUDIO_API_KEY`` set with your SambaStudio endpoint API key.
https://sambanova.ai/products/enterprise-ai-platform-sambanova-suite
read extra documentation in https://docs.sambanova.ai/sambastudio/latest/index.html
Example:
.. code-block:: python
from langchain_community.llms.sambanova import SambaStudio
SambaStudio(
sambastudio_url="your-SambaStudio-environment-URL",
sambastudio_api_key="your-SambaStudio-API-key,
model_kwargs={
"model" : model or expert name (set for CoE endpoints),
"max_tokens" : max number of tokens to generate,
"temperature" : model temperature,
"top_p" : model top p,
"top_k" : model top k,
"do_sample" : wether to do sample
"process_prompt": wether to process prompt
(set for CoE generic v1 and v2 endpoints)
},
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
streaming: bool
Whether to use streaming handler when using non streaming methods
model_kwargs: dict
Extra Key word arguments to pass to the model:
max_tokens: int
max tokens to generate
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
do_sample: bool
wether to do sample
process_prompt:
wether to process prompt (set for CoE generic v1 and v2 endpoints)
Key init args — client params:
sambastudio_url: str
SambaStudio endpoint Url
sambastudio_api_key: str
SambaStudio endpoint api key
Instantiate:
.. code-block:: python
from langchain_community.llms import SambaStudio
llm = SambaStudio=(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key,
model_kwargs = {
"model" : model or expert name (set for CoE endpoints),
"max_tokens" : max number of tokens to generate,
"temperature" : model temperature,
"top_p" : model top p,
"top_k" : model top k,
"do_sample" : wether to do sample
"process_prompt" : wether to process prompt
(set for CoE generic v1 and v2 endpoints)
}
)
Invoke:
.. code-block:: python
prompt = "tell me a joke"
response = llm.invoke(prompt)
Stream:
.. code-block:: python
for chunk in llm.stream(prompt):
print(chunk, end="", flush=True)
Async:
.. code-block:: python
response = llm.ainvoke(prompt)
await response
"""
sambastudio_url: str = Field(default="")
"""SambaStudio Url"""
sambastudio_api_key: SecretStr = Field(default=SecretStr(""))
"""SambaStudio api key"""
base_url: str = Field(default="", exclude=True)
"""SambaStudio non streaming URL"""
streaming_url: str = Field(default="", exclude=True)
"""SambaStudio streaming URL"""
streaming: bool = Field(default=False)
"""Whether to use streaming handler when using non streaming methods"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Key word arguments to pass to the model."""
class Config:
populate_by_name = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"sambastudio_url": "sambastudio_url",
"sambastudio_api_key": "sambastudio_api_key",
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {"streaming": self.streaming, **{"model_kwargs": self.model_kwargs}}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sambastudio-llm"
def __init__(self, **kwargs: Any) -> None:
"""init and validate environment variables"""
kwargs["sambastudio_url"] = get_from_dict_or_env(
kwargs, "sambastudio_url", "SAMBASTUDIO_URL"
)
kwargs["sambastudio_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "sambastudio_api_key", "SAMBASTUDIO_API_KEY")
)
kwargs["base_url"], kwargs["streaming_url"] = self._get_sambastudio_urls(
kwargs["sambastudio_url"]
)
super().__init__(**kwargs)
def _get_sambastudio_urls(self, url: str) -> Tuple[str, str]:
"""
Get streaming and non streaming URLs from the given URL
Args:
url: string with sambastudio base or streaming endpoint url
Returns:
base_url: string with url to do non streaming calls
streaming_url: string with url to do streaming calls
"""
if "openai" in url:
base_url = url
stream_url = url
else:
if "stream" in url:
base_url = url.replace("stream/", "")
stream_url = url
else:
base_url = url
if "generic" in url:
stream_url = "generic/stream".join(url.split("generic"))
else:
raise ValueError("Unsupported URL")
return base_url, stream_url
def _get_tuning_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get the tuning parameters to use when calling the LLM.
Args:
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
Returns:
The tuning parameters in the format required by api to use
"""
if stop is None:
stop = []
# get the parameters to use when calling the LLM.
_model_kwargs = self.model_kwargs or {}
# handle the case where stop sequences are send in the invocation
# and stop sequences has been also set in the model parameters
_stop_sequences = _model_kwargs.get("stop_sequences", []) + stop
if len(_stop_sequences) > 0:
_model_kwargs["stop_sequences"] = _stop_sequences
# set the parameters structure depending of the API
if "openai" in self.sambastudio_url:
if "select_expert" in _model_kwargs.keys():
_model_kwargs["model"] = _model_kwargs.pop("select_expert")
if "max_tokens_to_generate" in _model_kwargs.keys():
_model_kwargs["max_tokens"] = _model_kwargs.pop(
"max_tokens_to_generate"
)
if "process_prompt" in _model_kwargs.keys():
_model_kwargs.pop("process_prompt")
tuning_params = _model_kwargs
elif "api/v2/predict/generic" in self.sambastudio_url:
if "model" in _model_kwargs.keys():
_model_kwargs["select_expert"] = _model_kwargs.pop("model")
if "max_tokens" in _model_kwargs.keys():
_model_kwargs["max_tokens_to_generate"] = _model_kwargs.pop(
"max_tokens"
)
tuning_params = _model_kwargs
elif "api/predict/generic" in self.sambastudio_url:
if "model" in _model_kwargs.keys():
_model_kwargs["select_expert"] = _model_kwargs.pop("model")
if "max_tokens" in _model_kwargs.keys():
_model_kwargs["max_tokens_to_generate"] = _model_kwargs.pop(
"max_tokens"
)
tuning_params = {
k: {"type": type(v).__name__, "value": str(v)}
for k, v in (_model_kwargs.items())
}
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
return tuning_params
def _handle_request(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
streaming: Optional[bool] = False,
) -> Response:
"""
Performs a post request to the LLM API.
Args:
prompt: The prompt to pass into the model
stop: list of stop tokens
streaming: wether to do a streaming call
Returns:
A request Response object
"""
if isinstance(prompt, str):
prompt = [prompt]
params = self._get_tuning_params(stop)
# create request payload for openAI v1 API
if "openai" in self.sambastudio_url:
messages_dict = [{"role": "user", "content": prompt[0]}]
data = {"messages": messages_dict, "stream": streaming, **params}
data = {key: value for key, value in data.items() if value is not None}
headers = {
"Authorization": f"Bearer "
f"{self.sambastudio_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
# create request payload for generic v1 API
elif "api/v2/predict/generic" in self.sambastudio_url:
if params.get("process_prompt", False):
prompt = json.dumps(
{
"conversation_id": "sambaverse-conversation-id",
"messages": [
{"message_id": None, "role": "user", "content": prompt[0]}
],
}
)
else:
prompt = prompt[0]
items = [{"id": "item0", "value": prompt}]
params = {key: value for key, value in params.items() if value is not None}
data = {"items": items, "params": params}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
# create request payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
if params.get("process_prompt", False):
if params["process_prompt"].get("value") == "True":
prompt = json.dumps(
{
"conversation_id": "sambaverse-conversation-id",
"messages": [
{
"message_id": None,
"role": "user",
"content": prompt[0],
}
],
}
)
else:
prompt = prompt[0]
else:
prompt = prompt[0]
if streaming:
data = {"instance": prompt, "params": params}
else:
data = {"instances": [prompt], "params": params}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
# make the request to SambaStudio API
http_session = requests.Session()
if streaming:
response = http_session.post(
self.streaming_url, headers=headers, json=data, stream=True
)
else:
response = http_session.post(
self.base_url, headers=headers, json=data, stream=False
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova / complete call failed with status code "
f"{response.status_code}."
f"{response.text}."
)
return response
def _process_response(self, response: Response) -> str:
"""
Process a non streaming response from the api
Args:
response: A request Response object
Returns
completion: a string with model generation
"""
# Extract json payload form response
try:
response_dict = response.json()
except Exception as e:
raise RuntimeError(
f"Sambanova /complete call failed couldn't get JSON response {e}"
f"response: {response.text}"
)
# process response payload for openai compatible API
if "openai" in self.sambastudio_url:
completion = response_dict["choices"][0]["message"]["content"]
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
completion = response_dict["items"][0]["value"]["completion"]
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
completion = response_dict["predictions"][0]["completion"]
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
return completion
def _process_stream_response(self, response: Response) -> Iterator[GenerationChunk]:
"""
Process a streaming response from the api
Args:
response: An iterable request Response object
Yields:
GenerationChunk: a GenerationChunk with model partial generation
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
# process response payload for openai compatible API
if "openai" in self.sambastudio_url:
client = sseclient.SSEClient(response)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is not a final event ("[DONE]")
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if len(data["choices"]) > 0:
content = data["choices"][0]["delta"]["content"]
else:
content = ""
generated_chunk = GenerationChunk(text=content)
yield generated_chunk
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
for line in response.iter_lines():
try:
data = json.loads(line)
content = data["result"]["items"][0]["value"]["stream_token"]
generated_chunk = GenerationChunk(text=content)
yield generated_chunk
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
for line in response.iter_lines():
try:
data = json.loads(line)
content = data["result"]["responses"][0]["stream_token"]
generated_chunk = GenerationChunk(text=content)
yield generated_chunk
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
def _stream(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Sambanova's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: a list of strings on which the model should stop generating.
run_manager: A run manager with callbacks for the LLM.
Yields:
chunk: GenerationChunk with model partial generation
"""
response = self._handle_request(prompt, stop, streaming=True)
for chunk in self._process_stream_response(response):
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
def _call(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sambanova's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: a list of strings on which the model should stop generating.
Returns:
result: string with model generation
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
response = self._handle_request(prompt, stop, streaming=False)
completion = self._process_response(response)
return completion
class SambaNovaCloud(LLM):
"""
SambaNova Cloud large language models.
Setup:
To use, you should have the environment variables:
``SAMBANOVA_URL`` set with SambaNova Cloud URL.
defaults to http://cloud.sambanova.ai/
``SAMBANOVA_API_KEY`` set with your SambaNova Cloud API Key.
Example:
.. code-block:: python
from langchain_community.llms.sambanova import SambaNovaCloud
SambaNovaCloud(
sambanova_api_key="your-SambaNovaCloud-API-key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
streaming: bool
Whether to use streaming handler when using non streaming methods
max_tokens: int
max tokens to generate
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
Key init args — client params:
sambanova_url: str
SambaNovaCloud Url defaults to http://cloud.sambanova.ai/
sambanova_api_key: str
SambaNovaCloud api key
Instantiate:
.. code-block:: python
from langchain_community.llms.sambanova import SambaNovaCloud
SambaNovaCloud(
sambanova_api_key="your-SambaNovaCloud-API-key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k
)
Invoke:
.. code-block:: python
prompt = "tell me a joke"
response = llm.invoke(prompt)
Stream:
.. code-block:: python
for chunk in llm.stream(prompt):
print(chunk, end="", flush=True)
Async:
.. code-block:: python
response = llm.ainvoke(prompt)
await response
"""
sambanova_url: str = Field(default="")
"""SambaNova Cloud Url"""
sambanova_api_key: SecretStr = Field(default=SecretStr(""))
"""SambaNova Cloud api key"""
model: str = Field(default="Meta-Llama-3.1-8B-Instruct")
"""The name of the model"""
streaming: bool = Field(default=False)
"""Whether to use streaming handler when using non streaming methods"""
max_tokens: int = Field(default=1024)
"""max tokens to generate"""
temperature: float = Field(default=0.7)
"""model temperature"""
top_p: Optional[float] = Field(default=None)
"""model top p"""
top_k: Optional[int] = Field(default=None)
"""model top k"""
stream_options: dict = Field(default={"include_usage": True})
"""stream options, include usage to get generation metrics"""
class Config:
populate_by_name = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {"sambanova_api_key": "sambanova_api_key"}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
"model": self.model,
"streaming": self.streaming,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream_options": self.stream_options,
}
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "sambanovacloud-llm"
def __init__(self, **kwargs: Any) -> None:
"""init and validate environment variables"""
kwargs["sambanova_url"] = get_from_dict_or_env(
kwargs,
"sambanova_url",
"SAMBANOVA_URL",
default="https://api.sambanova.ai/v1/chat/completions",
)
kwargs["sambanova_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "sambanova_api_key", "SAMBANOVA_API_KEY")
)
super().__init__(**kwargs)
def _handle_request(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
streaming: Optional[bool] = False,
) -> Response:
"""
Performs a post request to the LLM API.
Args:
prompt: The prompt to pass into the model.
stop: list of stop tokens
Returns:
A request Response object
"""
if isinstance(prompt, str):
prompt = [prompt]
messages_dict = [{"role": "user", "content": prompt[0]}]
data = {
"messages": messages_dict,
"stream": streaming,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
}
data = {key: value for key, value in data.items() if value is not None}
headers = {
"Authorization": f"Bearer " f"{self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
http_session = requests.Session()
if streaming:
response = http_session.post(
self.sambanova_url, headers=headers, json=data, stream=True
)
else:
response = http_session.post(
self.sambanova_url, headers=headers, json=data, stream=False
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova / complete call failed with status code "
f"{response.status_code}."
f"{response.text}."
)
return response
def _process_response(self, response: Response) -> str:
"""
Process a non streaming response from the api
Args:
response: A request Response object
Returns
completion: a string with model generation
"""
# Extract json payload form response
try:
response_dict = response.json()
except Exception as e:
raise RuntimeError(
f"Sambanova /complete call failed couldn't get JSON response {e}"
f"response: {response.text}"
)
completion = response_dict["choices"][0]["message"]["content"]
return completion
def _process_stream_response(self, response: Response) -> Iterator[GenerationChunk]:
"""
Process a streaming response from the api
Args:
response: An iterable request Response object
Yields:
GenerationChunk: a GenerationChunk with model partial generation
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
client = sseclient.SSEClient(response)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is not a final event ("[DONE]")
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if len(data["choices"]) > 0:
content = data["choices"][0]["delta"]["content"]
else:
content = ""
generated_chunk = GenerationChunk(text=content)
yield generated_chunk
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
def _call(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to SambaNovaCloud complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
response = self._handle_request(prompt, stop, streaming=False)
completion = self._process_response(response)
return completion
def _stream(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to SambaNovaCloud complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
response = self._handle_request(prompt, stop, streaming=True)
for chunk in self._process_stream_response(response):
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/utils.py | """Common utility functions for LLM APIs."""
import re
from typing import List
def enforce_stop_tokens(text: str, stop: List[str]) -> str:
"""Cut off the text as soon as any stop words occur."""
return re.split("|".join(stop), text, maxsplit=1)[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/weight_only_quantization.py | import importlib
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "google/flan-t5-large"
DEFAULT_TASK = "text2text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class WeightOnlyQuantPipeline(LLM):
"""Weight only quantized model.
To use, you should have the `intel-extension-for-transformers` packabge and
`transformers` package installed.
intel-extension-for-transformers:
https://github.com/intel/intel-extension-for-transformers
Example using from_model_id:
.. code-block:: python
from langchain_community.llms import WeightOnlyQuantPipeline
from intel_extension_for_transformers.transformers import (
WeightOnlyQuantConfig
)
config = WeightOnlyQuantConfig
hf = WeightOnlyQuantPipeline.from_model_id(
model_id="google/flan-t5-large",
task="text2text-generation"
pipeline_kwargs={"max_new_tokens": 10},
quantization_config=config,
)
Example passing pipeline in directly:
.. code-block:: python
from langchain_community.llms import WeightOnlyQuantPipeline
from intel_extension_for_transformers.transformers import (
AutoModelForSeq2SeqLM
)
from intel_extension_for_transformers.transformers import (
WeightOnlyQuantConfig
)
from transformers import AutoTokenizer, pipeline
model_id = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
config = WeightOnlyQuantConfig
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id,
quantization_config=config,
)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=10,
)
hf = WeightOnlyQuantPipeline(pipeline=pipe)
"""
pipeline: Any = None #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name or local path to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Key word arguments passed to the pipeline."""
model_config = ConfigDict(
extra="allow",
)
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
device: Optional[int] = -1,
device_map: Optional[str] = None,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
load_in_4bit: Optional[bool] = False,
load_in_8bit: Optional[bool] = False,
quantization_config: Optional[Any] = None,
**kwargs: Any,
) -> LLM:
"""Construct the pipeline object from model_id and task."""
if device_map is not None and (isinstance(device, int) and device > -1):
raise ValueError("`Device` and `device_map` cannot be set simultaneously!")
if importlib.util.find_spec("torch") is None:
raise ValueError(
"Weight only quantization pipeline only support PyTorch now!"
)
try:
from intel_extension_for_transformers.transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
)
from intel_extension_for_transformers.utils.utils import is_ipex_available
from transformers import AutoTokenizer
from transformers import pipeline as hf_pipeline
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"Please install it with `pip install transformers` "
"and `pip install intel-extension-for-transformers`."
)
if isinstance(device, int) and device >= 0:
if not is_ipex_available():
raise ValueError("Don't find out Intel GPU on this machine!")
device_map = "xpu:" + str(device)
elif isinstance(device, int) and device < 0:
device = None
if device is None:
if device_map is None:
device_map = "cpu"
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(
model_id,
load_in_4bit=load_in_4bit,
load_in_8bit=load_in_8bit,
quantization_config=quantization_config,
use_llm_runtime=False,
device_map=device_map,
**_model_kwargs,
)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id,
load_in_4bit=load_in_4bit,
load_in_8bit=load_in_8bit,
quantization_config=quantization_config,
use_llm_runtime=False,
device_map=device_map,
**_model_kwargs,
)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ImportError(
f"Could not load the {task} model due to missing dependencies."
) from e
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "weight_only_quantization"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the HuggingFace model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import WeightOnlyQuantPipeline
llm = WeightOnlyQuantPipeline.from_model_id(
model_id="google/flan-t5-large",
task="text2text-generation",
)
llm.invoke("This is a prompt.")
"""
response = self.pipeline(prompt)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/chatglm3.py | import json
import logging
from typing import Any, List, Optional, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from pydantic import Field
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
HEADERS = {"Content-Type": "application/json"}
DEFAULT_TIMEOUT = 30
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {"role": "function", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
class ChatGLM3(LLM):
"""ChatGLM3 LLM service."""
model_name: str = Field(default="chatglm3-6b", alias="model")
endpoint_url: str = "http://127.0.0.1:8000/v1/chat/completions"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
max_tokens: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
prefix_messages: List[BaseMessage] = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
http_client: Union[Any, None] = None
timeout: int = DEFAULT_TIMEOUT
@property
def _llm_type(self) -> str:
return "chat_glm_3"
@property
def _invocation_params(self) -> dict:
"""Get the parameters used to invoke the model."""
params = {
"model": self.model_name,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"stream": self.streaming,
}
return {**params, **(self.model_kwargs or {})}
@property
def client(self) -> Any:
import httpx
return self.http_client or httpx.Client(timeout=self.timeout)
def _get_payload(self, prompt: str) -> dict:
params = self._invocation_params
messages = self.prefix_messages + [HumanMessage(content=prompt)]
params.update(
{
"messages": [_convert_message_to_dict(m) for m in messages],
}
)
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM3 LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm.invoke("Who are you?")
"""
import httpx
payload = self._get_payload(prompt)
logger.debug(f"ChatGLM3 payload: {payload}")
try:
response = self.client.post(
self.endpoint_url, headers=HEADERS, json=payload
)
except httpx.NetworkError as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logger.debug(f"ChatGLM3 response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
if isinstance(parsed_response, dict):
content_keys = "choices"
if content_keys in parsed_response:
choices = parsed_response[content_keys]
if len(choices):
text = choices[0]["message"]["content"]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except json.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/__init__.py | """
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
from typing import Any, Callable, Dict, Type
from langchain_core._api.deprecation import warn_deprecated
from langchain_core.language_models.llms import BaseLLM
def _import_ai21() -> Type[BaseLLM]:
from langchain_community.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Type[BaseLLM]:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Type[BaseLLM]:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Type[BaseLLM]:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Type[BaseLLM]:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
def _import_aphrodite() -> Type[BaseLLM]:
from langchain_community.llms.aphrodite import Aphrodite
return Aphrodite
def _import_arcee() -> Type[BaseLLM]:
from langchain_community.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Type[BaseLLM]:
from langchain_community.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baichuan() -> Type[BaseLLM]:
from langchain_community.llms.baichuan import BaichuanLLM
return BaichuanLLM
def _import_baidu_qianfan_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Type[BaseLLM]:
from langchain_community.llms.bananadev import Banana
return Banana
def _import_baseten() -> Type[BaseLLM]:
from langchain_community.llms.baseten import Baseten
return Baseten
def _import_beam() -> Type[BaseLLM]:
from langchain_community.llms.beam import Beam
return Beam
def _import_bedrock() -> Type[BaseLLM]:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
def _import_bigdlllm() -> Type[BaseLLM]:
from langchain_community.llms.bigdl_llm import BigdlLLM
return BigdlLLM
def _import_bittensor() -> Type[BaseLLM]:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Type[BaseLLM]:
from langchain_community.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Type[BaseLLM]:
from langchain_community.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Type[BaseLLM]:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Type[BaseLLM]:
from langchain_community.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Type[BaseLLM]:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Type[BaseLLM]:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Type[BaseLLM]:
from langchain_community.llms.databricks import Databricks
return Databricks
# deprecated / only for back compat - do not add to __all__
def _import_databricks_chat() -> Any:
warn_deprecated(
since="0.0.22",
removal="1.0",
alternative_import="langchain_community.chat_models.ChatDatabricks",
)
from langchain_community.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Type[BaseLLM]:
from langchain_community.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Type[BaseLLM]:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Type[BaseLLM]:
from langchain_community.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Type[BaseLLM]:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Type[BaseLLM]:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Type[BaseLLM]:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_friendli() -> Type[BaseLLM]:
from langchain_community.llms.friendli import Friendli
return Friendli
def _import_gigachat() -> Type[BaseLLM]:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Type[BaseLLM]:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Type[BaseLLM]:
from langchain_community.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Type[BaseLLM]:
from langchain_community.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Type[BaseLLM]:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Type[BaseLLM]:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Type[BaseLLM]:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Type[BaseLLM]:
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Type[BaseLLM]:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
def _import_ipex_llm() -> Type[BaseLLM]:
from langchain_community.llms.ipex_llm import IpexLLM
return IpexLLM
def _import_javelin_ai_gateway() -> Type[BaseLLM]:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Type[BaseLLM]:
from langchain_community.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_konko() -> Type[BaseLLM]:
from langchain_community.llms.konko import Konko
return Konko
def _import_llamacpp() -> Type[BaseLLM]:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_llamafile() -> Type[BaseLLM]:
from langchain_community.llms.llamafile import Llamafile
return Llamafile
def _import_manifest() -> Type[BaseLLM]:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Type[BaseLLM]:
from langchain_community.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Type[BaseLLM]:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
# deprecated / only for back compat - do not add to __all__
def _import_mlflow_chat() -> Any:
warn_deprecated(
since="0.0.22",
removal="1.0",
alternative_import="langchain_community.chat_models.ChatMlflow",
)
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Type[BaseLLM]:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_mlx_pipeline() -> Type[BaseLLM]:
from langchain_community.llms.mlx_pipeline import MLXPipeline
return MLXPipeline
def _import_modal() -> Type[BaseLLM]:
from langchain_community.llms.modal import Modal
return Modal
def _import_mosaicml() -> Type[BaseLLM]:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Type[BaseLLM]:
from langchain_community.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_oci_md_tgi() -> Type[BaseLLM]:
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
OCIModelDeploymentTGI,
)
return OCIModelDeploymentTGI
def _import_oci_md_vllm() -> Type[BaseLLM]:
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
OCIModelDeploymentVLLM,
)
return OCIModelDeploymentVLLM
def _import_oci_md() -> Type[BaseLLM]:
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
OCIModelDeploymentLLM,
)
return OCIModelDeploymentLLM
def _import_oci_gen_ai() -> Type[BaseLLM]:
from langchain_community.llms.oci_generative_ai import OCIGenAI
return OCIGenAI
def _import_octoai_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Type[BaseLLM]:
from langchain_community.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Type[BaseLLM]:
from langchain_community.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Type[BaseLLM]:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Type[BaseLLM]:
from langchain_community.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Type[BaseLLM]:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Type[BaseLLM]:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Type[BaseLLM]:
from langchain_community.llms.openlm import OpenLM
return OpenLM
def _import_outlines() -> Type[BaseLLM]:
from langchain_community.llms.outlines import Outlines
return Outlines
def _import_pai_eas_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Type[BaseLLM]:
from langchain_community.llms.petals import Petals
return Petals
def _import_pipelineai() -> Type[BaseLLM]:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Type[BaseLLM]:
from langchain_community.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Type[BaseLLM]:
from langchain_community.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Type[BaseLLM]:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Type[BaseLLM]:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Type[BaseLLM]:
from langchain_community.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Type[BaseLLM]:
from langchain_community.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Type[BaseLLM]:
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_sambanovacloud() -> Type[BaseLLM]:
from langchain_community.llms.sambanova import SambaNovaCloud
return SambaNovaCloud
def _import_sambastudio() -> Type[BaseLLM]:
from langchain_community.llms.sambanova import SambaStudio
return SambaStudio
def _import_self_hosted() -> Type[BaseLLM]:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Type[BaseLLM]:
from langchain_community.llms.self_hosted_hugging_face import (
SelfHostedHuggingFaceLLM,
)
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Type[BaseLLM]:
from langchain_community.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Type[BaseLLM]:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Type[BaseLLM]:
from langchain_community.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Type[BaseLLM]:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Type[BaseLLM]:
from langchain_community.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_together() -> Type[BaseLLM]:
from langchain_community.llms.together import Together
return Together
def _import_tongyi() -> Type[BaseLLM]:
from langchain_community.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Type[BaseLLM]:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Type[BaseLLM]:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Type[BaseLLM]:
from langchain_community.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Type[BaseLLM]:
from langchain_community.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Type[BaseLLM]:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_weight_only_quantization() -> Any:
from langchain_community.llms.weight_only_quantization import (
WeightOnlyQuantPipeline,
)
return WeightOnlyQuantPipeline
def _import_writer() -> Type[BaseLLM]:
from langchain_community.llms.writer import Writer
return Writer
def _import_xinference() -> Type[BaseLLM]:
from langchain_community.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Type[BaseLLM]:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
def _import_yuan2() -> Type[BaseLLM]:
from langchain_community.llms.yuan2 import Yuan2
return Yuan2
def _import_volcengine_maas() -> Type[BaseLLM]:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def _import_sparkllm() -> Type[BaseLLM]:
from langchain_community.llms.sparkllm import SparkLLM
return SparkLLM
def _import_you() -> Type[BaseLLM]:
from langchain_community.llms.you import You
return You
def _import_yi() -> Type[BaseLLM]:
from langchain_community.llms.yi import YiLLM
return YiLLM
def __getattr__(name: str) -> Any:
if name == "AI21":
return _import_ai21()
elif name == "AlephAlpha":
return _import_aleph_alpha()
elif name == "AmazonAPIGateway":
return _import_amazon_api_gateway()
elif name == "Anthropic":
return _import_anthropic()
elif name == "Anyscale":
return _import_anyscale()
elif name == "Aphrodite":
return _import_aphrodite()
elif name == "Arcee":
return _import_arcee()
elif name == "Aviary":
return _import_aviary()
elif name == "AzureMLOnlineEndpoint":
return _import_azureml_endpoint()
elif name == "BaichuanLLM" or name == "Baichuan":
return _import_baichuan()
elif name == "QianfanLLMEndpoint":
return _import_baidu_qianfan_endpoint()
elif name == "Banana":
return _import_bananadev()
elif name == "Baseten":
return _import_baseten()
elif name == "Beam":
return _import_beam()
elif name == "Bedrock":
return _import_bedrock()
elif name == "BigdlLLM":
return _import_bigdlllm()
elif name == "NIBittensorLLM":
return _import_bittensor()
elif name == "CerebriumAI":
return _import_cerebriumai()
elif name == "ChatGLM":
return _import_chatglm()
elif name == "Clarifai":
return _import_clarifai()
elif name == "Cohere":
return _import_cohere()
elif name == "CTransformers":
return _import_ctransformers()
elif name == "CTranslate2":
return _import_ctranslate2()
elif name == "Databricks":
return _import_databricks()
elif name == "DeepInfra":
return _import_deepinfra()
elif name == "DeepSparse":
return _import_deepsparse()
elif name == "EdenAI":
return _import_edenai()
elif name == "FakeListLLM":
return _import_fake()
elif name == "Fireworks":
return _import_fireworks()
elif name == "ForefrontAI":
return _import_forefrontai()
elif name == "Friendli":
return _import_friendli()
elif name == "GigaChat":
return _import_gigachat()
elif name == "GooglePalm":
return _import_google_palm()
elif name == "GooseAI":
return _import_gooseai()
elif name == "GPT4All":
return _import_gpt4all()
elif name == "GradientLLM":
return _import_gradient_ai()
elif name == "HuggingFaceEndpoint":
return _import_huggingface_endpoint()
elif name == "HuggingFaceHub":
return _import_huggingface_hub()
elif name == "HuggingFacePipeline":
return _import_huggingface_pipeline()
elif name == "HuggingFaceTextGenInference":
return _import_huggingface_text_gen_inference()
elif name == "HumanInputLLM":
return _import_human()
elif name == "IpexLLM":
return _import_ipex_llm()
elif name == "JavelinAIGateway":
return _import_javelin_ai_gateway()
elif name == "KoboldApiLLM":
return _import_koboldai()
elif name == "Konko":
return _import_konko()
elif name == "LlamaCpp":
return _import_llamacpp()
elif name == "Llamafile":
return _import_llamafile()
elif name == "ManifestWrapper":
return _import_manifest()
elif name == "Minimax":
return _import_minimax()
elif name == "Mlflow":
return _import_mlflow()
elif name == "MlflowAIGateway":
return _import_mlflow_ai_gateway()
elif name == "MLXPipeline":
return _import_mlx_pipeline()
elif name == "Modal":
return _import_modal()
elif name == "MosaicML":
return _import_mosaicml()
elif name == "NLPCloud":
return _import_nlpcloud()
elif name == "OCIModelDeploymentTGI":
return _import_oci_md_tgi()
elif name == "OCIModelDeploymentVLLM":
return _import_oci_md_vllm()
elif name == "OCIModelDeploymentLLM":
return _import_oci_md()
elif name == "OCIGenAI":
return _import_oci_gen_ai()
elif name == "OctoAIEndpoint":
return _import_octoai_endpoint()
elif name == "Ollama":
return _import_ollama()
elif name == "OpaquePrompts":
return _import_opaqueprompts()
elif name == "AzureOpenAI":
return _import_azure_openai()
elif name == "OpenAI":
return _import_openai()
elif name == "OpenAIChat":
return _import_openai_chat()
elif name == "OpenLLM":
return _import_openllm()
elif name == "OpenLM":
return _import_openlm()
elif name == "Outlines":
return _import_outlines()
elif name == "PaiEasEndpoint":
return _import_pai_eas_endpoint()
elif name == "Petals":
return _import_petals()
elif name == "PipelineAI":
return _import_pipelineai()
elif name == "Predibase":
return _import_predibase()
elif name == "PredictionGuard":
return _import_predictionguard()
elif name == "PromptLayerOpenAI":
return _import_promptlayer()
elif name == "PromptLayerOpenAIChat":
return _import_promptlayer_chat()
elif name == "Replicate":
return _import_replicate()
elif name == "RWKV":
return _import_rwkv()
elif name == "SagemakerEndpoint":
return _import_sagemaker_endpoint()
elif name == "SambaNovaCloud":
return _import_sambanovacloud()
elif name == "SambaStudio":
return _import_sambastudio()
elif name == "SelfHostedPipeline":
return _import_self_hosted()
elif name == "SelfHostedHuggingFaceLLM":
return _import_self_hosted_hugging_face()
elif name == "StochasticAI":
return _import_stochasticai()
elif name == "Nebula":
return _import_symblai_nebula()
elif name == "TextGen":
return _import_textgen()
elif name == "TitanTakeoff":
return _import_titan_takeoff()
elif name == "TitanTakeoffPro":
return _import_titan_takeoff_pro()
elif name == "Together":
return _import_together()
elif name == "Tongyi":
return _import_tongyi()
elif name == "VertexAI":
return _import_vertex()
elif name == "VertexAIModelGarden":
return _import_vertex_model_garden()
elif name == "VLLM":
return _import_vllm()
elif name == "VLLMOpenAI":
return _import_vllm_openai()
elif name == "WatsonxLLM":
return _import_watsonxllm()
elif name == "WeightOnlyQuantPipeline":
return _import_weight_only_quantization()
elif name == "Writer":
return _import_writer()
elif name == "Xinference":
return _import_xinference()
elif name == "YandexGPT":
return _import_yandex_gpt()
elif name == "Yuan2":
return _import_yuan2()
elif name == "VolcEngineMaasLLM":
return _import_volcengine_maas()
elif name == "SparkLLM":
return _import_sparkllm()
elif name == "YiLLM":
return _import_yi()
elif name == "You":
return _import_you()
elif name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
raise AttributeError(f"Could not find: {name}")
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Aphrodite",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"BaichuanLLM",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"Friendli",
"GPT4All",
"GigaChat",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"IpexLLM",
"JavelinAIGateway",
"KoboldApiLLM",
"Konko",
"LlamaCpp",
"Llamafile",
"ManifestWrapper",
"Minimax",
"Mlflow",
"MlflowAIGateway",
"MLXPipeline",
"Modal",
"MosaicML",
"NIBittensorLLM",
"NLPCloud",
"Nebula",
"OCIGenAI",
"OCIModelDeploymentTGI",
"OCIModelDeploymentVLLM",
"OCIModelDeploymentLLM",
"OctoAIEndpoint",
"Ollama",
"OpaquePrompts",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"Outlines",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"QianfanLLMEndpoint",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SambaNovaCloud",
"SambaStudio",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"SparkLLM",
"StochasticAI",
"TextGen",
"TitanTakeoff",
"TitanTakeoffPro",
"Together",
"Tongyi",
"VLLM",
"VLLMOpenAI",
"VertexAI",
"VertexAIModelGarden",
"VolcEngineMaasLLM",
"WatsonxLLM",
"WeightOnlyQuantPipeline",
"Writer",
"Xinference",
"YandexGPT",
"Yuan2",
"YiLLM",
"You",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"baichuan": _import_baichuan,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat, # deprecated / only for back compat
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"friendli": _import_friendli,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"konko": _import_konko,
"llamacpp": _import_llamacpp,
"llamafile": _import_llamafile,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat, # deprecated / only for back compat
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"mlx_pipeline": _import_mlx_pipeline,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"oci_model_deployment_tgi_endpoint": _import_oci_md_tgi,
"oci_model_deployment_vllm_endpoint": _import_oci_md_vllm,
"oci_model_deployment_endpoint": _import_oci_md,
"oci_generative_ai": _import_oci_gen_ai,
"octoai_endpoint": _import_octoai_endpoint,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"sambanovacloud": _import_sambanovacloud,
"sambastudio": _import_sambastudio,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"outlines": _import_outlines,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"weight_only_quantization": _import_weight_only_quantization,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"yuan2": _import_yuan2,
"VolcEngineMaasLLM": _import_volcengine_maas,
"SparkLLM": _import_sparkllm,
"yi": _import_yi,
"you": _import_you,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/mlflow.py | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from urllib.parse import urlparse
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from pydantic import Field, PrivateAttr
class Mlflow(LLM):
"""MLflow LLM service.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.llms import Mlflow
completions = Mlflow(
target_uri="http://localhost:5000",
endpoint="test",
temperature=0.1,
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: Dict[str, Any] = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
"""Extra parameters such as `temperature`."""
_client: Any = PrivateAttr()
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
"Please run `pip install mlflow[genai]` to install "
"required dependencies."
) from e
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"target_uri": self.target_uri,
"endpoint": self.endpoint,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
data: Dict[str, Any] = {
"prompt": prompt,
"temperature": self.temperature,
"n": self.n,
**self.extra_params,
**kwargs,
}
if stop := self.stop or stop:
data["stop"] = stop
if self.max_tokens is not None:
data["max_tokens"] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return resp["choices"][0]["text"]
@property
def _llm_type(self) -> str:
return "mlflow"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/fireworks.py | import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import convert_to_secret_str, pre_init
from langchain_core.utils.env import get_from_dict_or_env
from pydantic import Field, SecretStr
def _stream_response_to_generation_chunk(
stream_response: Any,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(
text=stream_response.choices[0].text,
generation_info=dict(
finish_reason=stream_response.choices[0].finish_reason,
logprobs=stream_response.choices[0].logprobs,
),
)
@deprecated(
since="0.0.26",
removal="1.0",
alternative_import="langchain_fireworks.Fireworks",
)
class Fireworks(BaseLLM):
"""Fireworks models."""
model: str = "accounts/fireworks/models/llama-v2-7b-chat"
model_kwargs: dict = Field(
default_factory=lambda: {
"temperature": 0.7,
"max_tokens": 512,
"top_p": 1,
}.copy()
)
fireworks_api_key: Optional[SecretStr] = None
max_retries: int = 20
batch_size: int = 20
use_retry: bool = True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"fireworks_api_key": "FIREWORKS_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "fireworks"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
"Could not import fireworks-ai python package. "
"Please install it with `pip install fireworks-ai`."
) from e
fireworks_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "fireworks_api_key", "FIREWORKS_API_KEY")
)
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fireworks"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Fireworks endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
"""
params = {
"model": self.model,
**self.model_kwargs,
}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = completion_with_retry_batching(
self,
self.use_retry,
prompt=_prompts,
run_manager=run_manager,
stop=stop,
**params,
)
choices.extend(response)
return self.create_llm_result(choices, prompts)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Fireworks endpoint async with k unique prompts."""
params = {
"model": self.model,
**self.model_kwargs,
}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = await acompletion_with_retry_batching(
self,
self.use_retry,
prompt=_prompts,
run_manager=run_manager,
stop=stop,
**params,
)
choices.extend(response)
return self.create_llm_result(choices, prompts)
def get_batch_prompts(
self,
prompts: List[str],
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
def create_llm_result(self, choices: Any, prompts: List[str]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i : (i + 1)]
generations.append(
[
Generation(
text=choice.__dict__["choices"][0].text,
)
for choice in sub_choices
]
)
llm_output = {"model": self.model}
return LLMResult(generations=generations, llm_output=llm_output)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
for stream_resp in completion_with_retry(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
async for stream_resp in await acompletion_with_retry_streaming(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def conditional_decorator(
condition: bool, decorator: Callable[[Any], Any]
) -> Callable[[Any], Any]:
"""Conditionally apply a decorator.
Args:
condition: A boolean indicating whether to apply the decorator.
decorator: A decorator function.
Returns:
A decorator function.
"""
def actual_decorator(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
if condition:
return decorator(func)
return func
return actual_decorator
def completion_with_retry(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.create(
**kwargs,
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return await fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def completion_with_retry_batching(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs["prompt"]
del kwargs["prompt"]
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(prompt: str) -> Any:
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
def batch_sync_run() -> List:
with ThreadPoolExecutor() as executor:
results = list(executor.map(_completion_with_retry, prompt))
return results
return batch_sync_run()
async def acompletion_with_retry_batching(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs["prompt"]
del kwargs["prompt"]
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(prompt: str) -> Any:
return await fireworks.client.Completion.acreate(**kwargs, prompt=prompt)
def run_coroutine_in_new_loop(
coroutine_func: Any, *args: Dict, **kwargs: Dict
) -> Any:
new_loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(new_loop)
return new_loop.run_until_complete(coroutine_func(*args, **kwargs))
finally:
new_loop.close()
async def batch_sync_run() -> List:
with ThreadPoolExecutor() as executor:
results = list(
executor.map(
run_coroutine_in_new_loop,
[_completion_with_retry] * len(prompt),
prompt,
)
)
return results
return await batch_sync_run()
async def acompletion_with_retry_streaming(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call for streaming."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def _create_retry_decorator(
llm: Fireworks,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [
fireworks.client.error.RateLimitError,
fireworks.client.error.InternalServerError,
fireworks.client.error.BadGatewayError,
fireworks.client.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/loading.py | """Base interface for loading large language model APIs."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain_core.language_models.llms import BaseLLM
from langchain_core.utils.pydantic import get_fields
from langchain_community.llms import get_type_to_cls_dict
_ALLOW_DANGEROUS_DESERIALIZATION_ARG = "allow_dangerous_deserialization"
def load_llm_from_config(config: dict, **kwargs: Any) -> BaseLLM:
"""Load LLM from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify an LLM Type in config")
config_type = config.pop("_type")
type_to_cls_dict = get_type_to_cls_dict()
if config_type not in type_to_cls_dict:
raise ValueError(f"Loading {config_type} LLM not supported")
llm_cls = type_to_cls_dict[config_type]()
load_kwargs = {}
if _ALLOW_DANGEROUS_DESERIALIZATION_ARG in get_fields(llm_cls):
load_kwargs[_ALLOW_DANGEROUS_DESERIALIZATION_ARG] = kwargs.get(
_ALLOW_DANGEROUS_DESERIALIZATION_ARG, False
)
return llm_cls(**config, **load_kwargs)
def load_llm(file: Union[str, Path], **kwargs: Any) -> BaseLLM:
"""Load LLM from a file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Load the LLM from the config now.
return load_llm_from_config(config, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/writer.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, Field, SecretStr, model_validator
class Writer(LLM):
"""Writer large language models.
To use, you should have the ``writer-sdk`` Python package installed, and the
environment variable ``WRITER_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import Writer as WriterLLM
from writerai import Writer, AsyncWriter
client = Writer()
async_client = AsyncWriter()
chat = WriterLLM(
client=client,
async_client=async_client
)
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
api_key: Optional[SecretStr] = Field(default=None)
"""Writer API key."""
model_name: str = Field(default="palmyra-x-003-instruct", alias="model")
"""Model name to use."""
max_tokens: Optional[int] = None
"""The maximum number of tokens that the model can generate in the response."""
temperature: Optional[float] = 0.7
"""Controls the randomness of the model's outputs. Higher values lead to more
random outputs, while lower values make the model more deterministic."""
top_p: Optional[float] = None
"""Used to control the nucleus sampling, where only the most probable tokens
with a cumulative probability of top_p are considered for sampling, providing
a way to fine-tune the randomness of predictions."""
stop: Optional[List[str]] = None
"""Specifies stopping conditions for the model's output generation. This can
be an array of strings or a single string that the model will look for as a
signal to stop generating further tokens."""
best_of: Optional[int] = None
"""Specifies the number of completions to generate and return the best one.
Useful for generating multiple outputs and choosing the best based on some
criteria."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
model_config = ConfigDict(populate_by_name=True)
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"stop": self.stop,
"best_of": self.best_of,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model_name,
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "writer"
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validates that api key is passed and creates Writer clients."""
try:
from writerai import AsyncClient, Client
except ImportError as e:
raise ImportError(
"Could not import writerai python package. "
"Please install it with `pip install writerai`."
) from e
if not values.get("client"):
values.update(
{
"client": Client(
api_key=get_from_dict_or_env(
values, "api_key", "WRITER_API_KEY"
)
)
}
)
if not values.get("async_client"):
values.update(
{
"async_client": AsyncClient(
api_key=get_from_dict_or_env(
values, "api_key", "WRITER_API_KEY"
)
)
}
)
if not (
type(values.get("client")) is Client
and type(values.get("async_client")) is AsyncClient
):
raise ValueError(
"'client' attribute must be with type 'Client' and "
"'async_client' must be with type 'AsyncClient' from 'writerai' package"
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = {**self._identifying_params, **kwargs}
if stop is not None:
params.update({"stop": stop})
text = self.client.completions.create(prompt=prompt, **params).choices[0].text
return text
async def _acall(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = {**self._identifying_params, **kwargs}
if stop is not None:
params.update({"stop": stop})
response = await self.async_client.completions.create(prompt=prompt, **params)
text = response.choices[0].text
return text
def _stream(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._identifying_params, **kwargs, "stream": True}
if stop is not None:
params.update({"stop": stop})
response = self.client.completions.create(prompt=prompt, **params)
for chunk in response:
if run_manager:
run_manager.on_llm_new_token(chunk.value)
yield GenerationChunk(text=chunk.value)
async def _astream(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._identifying_params, **kwargs, "stream": True}
if stop is not None:
params.update({"stop": stop})
response = await self.async_client.completions.create(prompt=prompt, **params)
async for chunk in response:
if run_manager:
await run_manager.on_llm_new_token(chunk.value)
yield GenerationChunk(text=chunk.value)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/solar.py | from typing import Any, Dict, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from langchain_community.llms.utils import enforce_stop_tokens
SOLAR_SERVICE_URL_BASE = "https://api.upstage.ai/v1/solar"
SOLAR_SERVICE = "https://api.upstage.ai"
class _SolarClient(BaseModel):
"""An API client that talks to the Solar server."""
api_key: SecretStr
"""The API key to use for authentication."""
base_url: str = SOLAR_SERVICE_URL_BASE
def completion(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_key.get_secret_value()}"}
response = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=request,
)
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()["choices"][0]["message"]["content"]
class SolarCommon(BaseModel):
"""Common configuration for Solar LLMs."""
_client: _SolarClient
base_url: str = SOLAR_SERVICE_URL_BASE
solar_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Solar API key. Get it here: https://console.upstage.ai/services/solar"""
model_name: str = Field(default="solar-1-mini-chat", alias="model")
"""Model name. Available models listed here: https://console.upstage.ai/services/solar"""
max_tokens: int = Field(default=1024)
temperature: float = 0.3
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="ignore",
protected_namespaces=(),
)
@property
def lc_secrets(self) -> dict:
return {"solar_api_key": "SOLAR_API_KEY"}
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model_name,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **self._default_params}
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
api_key = get_from_dict_or_env(values, "solar_api_key", "SOLAR_API_KEY")
if api_key is None or len(api_key) == 0:
raise ValueError("SOLAR_API_KEY must be configured")
values["solar_api_key"] = convert_to_secret_str(api_key)
if "base_url" not in values:
values["base_url"] = SOLAR_SERVICE_URL_BASE
if "base_url" in values and not values["base_url"].startswith(SOLAR_SERVICE):
raise ValueError("base_url must match with: " + SOLAR_SERVICE)
values["_client"] = _SolarClient(
api_key=values["solar_api_key"], base_url=values["base_url"]
)
return values
@property
def _llm_type(self) -> str:
return "solar"
class Solar(SolarCommon, LLM):
"""Solar large language models.
To use, you should have the environment variable
``SOLAR_API_KEY`` set with your API key.
Referenced from https://console.upstage.ai/services/solar
"""
model_config = ConfigDict(
populate_by_name=True,
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._invocation_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._client.completion(request)
if stop is not None:
# This is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/bittensor.py | import http.client
import json
import ssl
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
class NIBittensorLLM(LLM):
"""NIBittensor LLMs
NIBittensorLLM is created by Neural Internet (https://neuralinternet.ai/),
powered by Bittensor, a decentralized network full of different AI models.
To analyze API_KEYS and logs of your usage visit
https://api.neuralinternet.ai/api-keys
https://api.neuralinternet.ai/logs
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM()
"""
system_prompt: Optional[str]
"""Provide system prompt that you want to supply it to model before every prompt"""
top_responses: Optional[int] = 0
"""Provide top_responses to get Top N miner responses on one request.May get delayed
Don't use in Production"""
@property
def _llm_type(self) -> str:
return "NIBittensorLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with \
5+ years of experience.")
"""
# Creating HTTPS connection with SSL
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection("test.neuralinternet.ai", context=context)
# Sanitizing User Input before passing to API.
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = "You are an assistant which is created by Neural Internet(NI) \
in decentralized network named as a Bittensor."
if self.system_prompt is None:
system_prompt = (
default_prompt
+ " Your task is to provide accurate response based on user prompt"
)
else:
system_prompt = default_prompt + str(self.system_prompt)
# Retrieving API KEY to pass into header of each request
conn.request("GET", "/admin/api-keys/")
api_key_response = conn.getresponse()
api_keys_data = (
api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]["api_key"]
# Creating Header and getting top benchmark miner uids
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"Endpoint-Version": "2023-05-19",
}
conn.request("GET", "/top_miner_uids", headers=headers)
miner_response = conn.getresponse()
miner_data = (
miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
uids = json.loads(miner_data)
# Condition for benchmark miner response
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps(
{
"uids": [uid],
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
init_response = conn.getresponse()
init_data = (
init_response.read()
.decode("utf-8")
.replace("\n", "")
.replace("\t", "")
)
init_json = json.loads(init_data)
if "choices" not in init_json:
continue
reply = init_json["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception:
continue
# For top miner based on bittensor response
try:
payload = json.dumps(
{
"top_n": top_n,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
response = conn.getresponse()
utf_string = (
response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception as e:
conn.request("GET", f"/error_msg?e={e}&p={prompt}", headers=headers)
return "Sorry I am unable to provide response now, Please try again later."
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"system_prompt": self.system_prompt,
"top_responses": self.top_responses,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/layerup_security.py | import logging
from typing import Any, Callable, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
logger = logging.getLogger(__name__)
def default_guardrail_violation_handler(violation: dict) -> str:
"""Default guardrail violation handler.
Args:
violation (dict): The violation dictionary.
Returns:
str: The canned response.
"""
if violation.get("canned_response"):
return violation["canned_response"]
guardrail_name = (
f"Guardrail {violation.get('offending_guardrail')}"
if violation.get("offending_guardrail")
else "A guardrail"
)
raise ValueError(
f"{guardrail_name} was violated without a proper guardrail violation handler."
)
class LayerupSecurity(LLM):
"""Layerup Security LLM service."""
llm: LLM
layerup_api_key: str
layerup_api_base_url: str = "https://api.uselayerup.com/v1"
prompt_guardrails: Optional[List[str]] = []
response_guardrails: Optional[List[str]] = []
mask: bool = False
metadata: Optional[Dict[str, Any]] = {}
handle_prompt_guardrail_violation: Callable[[dict], str] = (
default_guardrail_violation_handler
)
handle_response_guardrail_violation: Callable[[dict], str] = (
default_guardrail_violation_handler
)
client: Any #: :meta private:
@model_validator(mode="before")
@classmethod
def validate_layerup_sdk(cls, values: Dict[str, Any]) -> Any:
try:
from layerup_security import LayerupSecurity as LayerupSecuritySDK
values["client"] = LayerupSecuritySDK(
api_key=values["layerup_api_key"],
base_url=values["layerup_api_base_url"],
)
except ImportError:
raise ImportError(
"Could not import LayerupSecurity SDK. "
"Please install it with `pip install LayerupSecurity`."
)
return values
@property
def _llm_type(self) -> str:
return "layerup_security"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
messages = [{"role": "user", "content": prompt}]
unmask_response = None
if self.mask:
messages, unmask_response = self.client.mask_prompt(messages, self.metadata)
if self.prompt_guardrails:
security_response = self.client.execute_guardrails(
self.prompt_guardrails, messages, prompt, self.metadata
)
if not security_response["all_safe"]:
return self.handle_prompt_guardrail_violation(security_response)
result = self.llm._call(
messages[0]["content"], run_manager=run_manager, **kwargs
)
if self.mask and unmask_response:
result = unmask_response(result)
messages.append({"role": "assistant", "content": result})
if self.response_guardrails:
security_response = self.client.execute_guardrails(
self.response_guardrails, messages, result, self.metadata
)
if not security_response["all_safe"]:
return self.handle_response_guardrail_violation(security_response)
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/you.py | import os
from typing import Any, Dict, Generator, Iterator, List, Literal, Optional
import requests
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import Field
SMART_ENDPOINT = "https://chat-api.you.com/smart"
RESEARCH_ENDPOINT = "https://chat-api.you.com/research"
def _request(base_url: str, api_key: str, **kwargs: Any) -> Dict[str, Any]:
"""
NOTE: This function can be replaced by a OpenAPI-generated Python SDK in the future,
for better input/output typing support.
"""
headers = {"x-api-key": api_key}
response = requests.post(base_url, headers=headers, json=kwargs)
response.raise_for_status()
return response.json()
def _request_stream(
base_url: str, api_key: str, **kwargs: Any
) -> Generator[str, None, None]:
headers = {"x-api-key": api_key}
params = dict(**kwargs, stream=True)
response = requests.post(base_url, headers=headers, stream=True, json=params)
response.raise_for_status()
# Explicitly coercing the response to a generator to satisfy mypy
event_source = (bytestring for bytestring in response)
try:
import sseclient
client = sseclient.SSEClient(event_source)
except ImportError:
raise ImportError(
(
"Could not import `sseclient`. "
"Please install it with `pip install sseclient-py`."
)
)
for event in client.events():
if event.event in ("search_results", "done"):
pass
elif event.event == "token":
yield event.data
elif event.event == "error":
raise ValueError(f"Error in response: {event.data}")
else:
raise NotImplementedError(f"Unknown event type {event.event}")
class You(LLM):
"""Wrapper around You.com's conversational Smart and Research APIs.
Each API endpoint is designed to generate conversational
responses to a variety of query types, including inline citations
and web results when relevant.
Smart Endpoint:
- Quick, reliable answers for a variety of questions
- Cites the entire web page URL
Research Endpoint:
- In-depth answers with extensive citations for a variety of questions
- Cites the specific web page snippet relevant to the claim
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
For more information, check out the documentations at
https://documentation.you.com/api-reference/.
Args:
endpoint: You.com conversational endpoints. Choose from "smart" or "research"
ydc_api_key: You.com API key, if `YDC_API_KEY` is not set in the environment
"""
endpoint: Literal["smart", "research"] = Field(
"smart",
description=(
'You.com conversational endpoints. Choose from "smart" or "research"'
),
)
ydc_api_key: Optional[str] = Field(
None,
description="You.com API key, if `YDC_API_KEY` is not set in the envrioment",
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop:
raise NotImplementedError(
"Stop words are not implemented for You.com endpoints."
)
params = {"query": prompt}
response = _request(self._request_endpoint, api_key=self._api_key, **params)
return response["answer"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
if stop:
raise NotImplementedError(
"Stop words are not implemented for You.com endpoints."
)
params = {"query": prompt}
for token in _request_stream(
self._request_endpoint, api_key=self._api_key, **params
):
yield GenerationChunk(text=token)
@property
def _request_endpoint(self) -> str:
if self.endpoint == "smart":
return SMART_ENDPOINT
return RESEARCH_ENDPOINT
@property
def _api_key(self) -> str:
return self.ydc_api_key or os.environ["YDC_API_KEY"]
@property
def _llm_type(self) -> str:
return "you.com"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/anthropic.py | import re
import warnings
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.prompt_values import PromptValue
from langchain_core.utils import (
check_package_version,
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from langchain_core.utils.utils import _build_model_kwargs, convert_to_secret_str
from pydantic import ConfigDict, Field, SecretStr, model_validator
class _AnthropicCommon(BaseLanguageModel):
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: str = Field(default="claude-2", alias="model_name")
"""Model name to use."""
max_tokens_to_sample: int = Field(default=256, alias="max_tokens")
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: Optional[int] = None
"""Number of most likely tokens to consider at each step."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
default_request_timeout: Optional[float] = None
"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""
max_retries: int = 2
"""Number of retries allowed for requests sent to the Anthropic Completion API."""
anthropic_api_url: Optional[str] = None
anthropic_api_key: Optional[SecretStr] = None
HUMAN_PROMPT: Optional[str] = None
AI_PROMPT: Optional[str] = None
count_tokens: Optional[Callable[[str], int]] = None
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict) -> Any:
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anthropic_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "anthropic_api_key", "ANTHROPIC_API_KEY")
)
# Get custom api url from environment.
values["anthropic_api_url"] = get_from_dict_or_env(
values,
"anthropic_api_url",
"ANTHROPIC_API_URL",
default="https://api.anthropic.com",
)
try:
import anthropic
check_package_version("anthropic", gte_version="0.3")
values["client"] = anthropic.Anthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"].get_secret_value(),
timeout=values["default_request_timeout"],
max_retries=values["max_retries"],
)
values["async_client"] = anthropic.AsyncAnthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"].get_secret_value(),
timeout=values["default_request_timeout"],
max_retries=values["max_retries"],
)
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
values["AI_PROMPT"] = anthropic.AI_PROMPT
values["count_tokens"] = values["client"].count_tokens
except ImportError:
raise ImportError(
"Could not import anthropic python package. "
"Please it install it with `pip install anthropic`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return {**d, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{}, **self._default_params}
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if stop is None:
stop = []
# Never want model to invent new turns of Human / Assistant dialog.
stop.extend([self.HUMAN_PROMPT])
return stop
@deprecated(
since="0.0.28",
removal="1.0",
alternative_import="langchain_anthropic.AnthropicLLM",
)
class Anthropic(LLM, _AnthropicCommon):
"""Anthropic large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain_community.llms import Anthropic
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
# and AI_PROMPT.
response = model.invoke("What are the biggest risks facing humanity?")
# Or if you want to use the chat mode, build a few-shot-prompt, or
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
raw_prompt = "What are the biggest risks facing humanity?"
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
response = model.invoke(prompt)
"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@pre_init
def raise_warning(cls, values: Dict) -> Dict:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain_community.chat_models import ChatAnthropic` "
"instead"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic-llm"
def _wrap_prompt(self, prompt: str) -> str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if prompt.startswith(self.HUMAN_PROMPT):
return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model.invoke(prompt)
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = self.client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def convert_prompt(self, prompt: PromptValue) -> str:
return self._wrap_prompt(prompt.to_string())
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params
):
chunk = GenerationChunk(text=token.completion)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
async for token in await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**params,
):
chunk = GenerationChunk(text=token.completion)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/llms | lc_public_repos/langchain/libs/community/langchain_community/llms/grammars/json.gbnf | # Grammar for subset of JSON - doesn't support full string or number syntax
root ::= object
value ::= object | array | string | number | boolean | "null"
object ::=
"{" ws (
string ":" ws value
("," ws string ":" ws value)*
)? "}"
array ::=
"[" ws (
value
("," ws value)*
)? "]"
string ::=
"\"" (
[^"\\] |
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
)* "\"" ws
# Only plain integers currently
number ::= "-"? [0-9]+ ws
boolean ::= ("true" | "false") ws
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)? |
0 | lc_public_repos/langchain/libs/community/langchain_community/llms | lc_public_repos/langchain/libs/community/langchain_community/llms/grammars/list.gbnf | root ::= "[" items "]" EOF
items ::= item ("," ws* item)*
item ::= string
string ::=
"\"" word (ws+ word)* "\"" ws*
word ::= [a-zA-Z]+
ws ::= " "
EOF ::= "\n" |
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/render.py | from langchain_core.utils.function_calling import (
format_tool_to_openai_function,
format_tool_to_openai_tool,
)
__all__ = ["format_tool_to_openai_function", "format_tool_to_openai_tool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/plugin.py | from __future__ import annotations
import json
from typing import Optional, Type
import requests
import yaml
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel
class ApiConfig(BaseModel):
"""API Configuration."""
type: str
url: str
has_user_authentication: Optional[bool] = False
class AIPlugin(BaseModel):
"""AI Plugin Definition."""
schema_version: str
name_for_model: str
name_for_human: str
description_for_model: str
description_for_human: str
auth: Optional[dict] = None
api: ApiConfig
logo_url: Optional[str]
contact_email: Optional[str]
legal_info_url: Optional[str]
@classmethod
def from_url(cls, url: str) -> AIPlugin:
"""Instantiate AIPlugin from a URL."""
response = requests.get(url).json()
return cls(**response)
def marshal_spec(txt: str) -> dict:
"""Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
"""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
class AIPluginToolSchema(BaseModel):
"""Schema for AIPluginTool."""
tool_input: Optional[str] = ""
class AIPluginTool(BaseTool): # type: ignore[override, override]
"""Tool for getting the OpenAPI spec for an AI Plugin."""
plugin: AIPlugin
api_spec: str
args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema
@classmethod
def from_plugin_url(cls, url: str) -> AIPluginTool:
plugin = AIPlugin.from_url(url)
description = (
f"Call this tool to get the OpenAPI spec (and usage guide) "
f"for interacting with the {plugin.name_for_human} API. "
f"You should only call this ONCE! What is the "
f"{plugin.name_for_human} API useful for? "
) + plugin.description_for_human
open_api_spec_str = requests.get(plugin.api.url).text
open_api_spec = marshal_spec(open_api_spec_str)
api_spec = (
f"Usage Guide: {plugin.description_for_model}\n\n"
f"OpenAPI Spec: {open_api_spec}"
)
return cls(
name=plugin.name_for_model,
description=description,
plugin=plugin,
api_spec=api_spec,
)
def _run(
self,
tool_input: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_spec
async def _arun(
self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return self.api_spec
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/ifttt.py | """From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services.
# Creating a webhook
- Go to https://ifttt.com/create
# Configuring the "If This"
- Click on the "If This" button in the IFTTT interface.
- Search for "Webhooks" in the search bar.
- Choose the first option for "Receive a web request with a JSON payload."
- Choose an Event Name that is specific to the service you plan to connect to.
This will make it easier for you to manage the webhook URL.
For example, if you're connecting to Spotify, you could use "Spotify" as your
Event Name.
- Click the "Create Trigger" button to save your settings and create your webhook.
# Configuring the "Then That"
- Tap on the "Then That" button in the IFTTT interface.
- Search for the service you want to connect, such as Spotify.
- Choose an action from the service, such as "Add track to a playlist".
- Configure the action by specifying the necessary details, such as the playlist name,
e.g., "Songs from AI".
- Reference the JSON Payload received by the Webhook in your action. For the Spotify
scenario, choose "{{JsonPayload}}" as your search query.
- Tap the "Create Action" button to save your action settings.
- Once you have finished configuring your action, click the "Finish" button to
complete the setup.
- Congratulations! You have successfully connected the Webhook to the desired
service, and you're ready to start receiving data and triggering actions 🎉
# Finishing up
- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings
- Copy the IFTTT key value from there. The URL is of the form
https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value.
"""
from typing import Optional
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
class IFTTTWebhook(BaseTool): # type: ignore[override]
"""IFTTT Webhook.
Args:
name: name of the tool
description: description of the tool
url: url to hit with the json event.
"""
url: str
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
body = {"this": tool_input}
response = requests.post(self.url, data=body)
return response.text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/google_books.py | """Tool for the Google Books API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_books import GoogleBooksAPIWrapper
class GoogleBooksQueryInput(BaseModel):
"""Input for the GoogleBooksQuery tool."""
query: str = Field(description="query to look up on google books")
class GoogleBooksQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Google Books API."""
name: str = "GoogleBooks"
description: str = (
"A wrapper around Google Books. "
"Useful for when you need to answer general inquiries about "
"books of certain topics and generate recommendation based "
"off of key words"
"Input should be a query string"
)
api_wrapper: GoogleBooksAPIWrapper
args_schema: Type[BaseModel] = GoogleBooksQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Google Books tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/__init__.py | """**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_core.tools import (
BaseTool as BaseTool,
)
from langchain_core.tools import (
StructuredTool as StructuredTool,
)
from langchain_core.tools import (
Tool as Tool,
)
from langchain_core.tools.convert import tool as tool
from langchain_community.tools.ainetwork.app import (
AINAppOps,
)
from langchain_community.tools.ainetwork.owner import (
AINOwnerOps,
)
from langchain_community.tools.ainetwork.rule import (
AINRuleOps,
)
from langchain_community.tools.ainetwork.transfer import (
AINTransfer,
)
from langchain_community.tools.ainetwork.value import (
AINValueOps,
)
from langchain_community.tools.arxiv.tool import (
ArxivQueryRun,
)
from langchain_community.tools.asknews.tool import (
AskNewsSearch,
)
from langchain_community.tools.azure_ai_services import (
AzureAiServicesDocumentIntelligenceTool,
AzureAiServicesImageAnalysisTool,
AzureAiServicesSpeechToTextTool,
AzureAiServicesTextAnalyticsForHealthTool,
AzureAiServicesTextToSpeechTool,
)
from langchain_community.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
AzureCogsTextAnalyticsHealthTool,
)
from langchain_community.tools.bearly.tool import (
BearlyInterpreterTool,
)
from langchain_community.tools.bing_search.tool import (
BingSearchResults,
BingSearchRun,
)
from langchain_community.tools.brave_search.tool import (
BraveSearch,
)
from langchain_community.tools.cassandra_database.tool import (
GetSchemaCassandraDatabaseTool, # noqa: F401
GetTableDataCassandraDatabaseTool, # noqa: F401
QueryCassandraDatabaseTool, # noqa: F401
)
from langchain_community.tools.cogniswitch.tool import (
CogniswitchKnowledgeRequest,
CogniswitchKnowledgeSourceFile,
CogniswitchKnowledgeSourceURL,
CogniswitchKnowledgeStatus,
)
from langchain_community.tools.connery import (
ConneryAction,
)
from langchain_community.tools.convert_to_openai import (
format_tool_to_openai_function,
)
from langchain_community.tools.dataherald import DataheraldTextToSQL
from langchain_community.tools.ddg_search.tool import (
DuckDuckGoSearchResults,
DuckDuckGoSearchRun,
)
from langchain_community.tools.e2b_data_analysis.tool import (
E2BDataAnalysisTool,
)
from langchain_community.tools.edenai import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToTextTool,
EdenAiTextModerationTool,
EdenAiTextToSpeechTool,
EdenaiTool,
)
from langchain_community.tools.eleven_labs.text2speech import (
ElevenLabsText2SpeechTool,
)
from langchain_community.tools.file_management import (
CopyFileTool,
DeleteFileTool,
FileSearchTool,
ListDirectoryTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
)
from langchain_community.tools.financial_datasets.balance_sheets import (
BalanceSheets,
)
from langchain_community.tools.financial_datasets.cash_flow_statements import (
CashFlowStatements,
)
from langchain_community.tools.financial_datasets.income_statements import (
IncomeStatements,
)
from langchain_community.tools.gmail import (
GmailCreateDraft,
GmailGetMessage,
GmailGetThread,
GmailSearch,
GmailSendMessage,
)
from langchain_community.tools.google_books import (
GoogleBooksQueryRun,
)
from langchain_community.tools.google_cloud.texttospeech import (
GoogleCloudTextToSpeechTool,
)
from langchain_community.tools.google_places.tool import (
GooglePlacesTool,
)
from langchain_community.tools.google_search.tool import (
GoogleSearchResults,
GoogleSearchRun,
)
from langchain_community.tools.google_serper.tool import (
GoogleSerperResults,
GoogleSerperRun,
)
from langchain_community.tools.graphql.tool import (
BaseGraphQLTool,
)
from langchain_community.tools.human.tool import (
HumanInputRun,
)
from langchain_community.tools.ifttt import (
IFTTTWebhook,
)
from langchain_community.tools.interaction.tool import (
StdInInquireTool,
)
from langchain_community.tools.jina_search.tool import JinaSearch
from langchain_community.tools.jira.tool import (
JiraAction,
)
from langchain_community.tools.json.tool import (
JsonGetValueTool,
JsonListKeysTool,
)
from langchain_community.tools.merriam_webster.tool import (
MerriamWebsterQueryRun,
)
from langchain_community.tools.metaphor_search import (
MetaphorSearchResults,
)
from langchain_community.tools.mojeek_search.tool import (
MojeekSearch,
)
from langchain_community.tools.nasa.tool import (
NasaAction,
)
from langchain_community.tools.office365.create_draft_message import (
O365CreateDraftMessage,
)
from langchain_community.tools.office365.events_search import (
O365SearchEvents,
)
from langchain_community.tools.office365.messages_search import (
O365SearchEmails,
)
from langchain_community.tools.office365.send_event import (
O365SendEvent,
)
from langchain_community.tools.office365.send_message import (
O365SendMessage,
)
from langchain_community.tools.office365.utils import (
authenticate,
)
from langchain_community.tools.openapi.utils.api_models import (
APIOperation,
)
from langchain_community.tools.openapi.utils.openapi_utils import (
OpenAPISpec,
)
from langchain_community.tools.openweathermap.tool import (
OpenWeatherMapQueryRun,
)
from langchain_community.tools.playwright import (
ClickTool,
CurrentWebPageTool,
ExtractHyperlinksTool,
ExtractTextTool,
GetElementsTool,
NavigateBackTool,
NavigateTool,
)
from langchain_community.tools.plugin import (
AIPluginTool,
)
from langchain_community.tools.polygon.aggregates import (
PolygonAggregates,
)
from langchain_community.tools.polygon.financials import (
PolygonFinancials,
)
from langchain_community.tools.polygon.last_quote import (
PolygonLastQuote,
)
from langchain_community.tools.polygon.ticker_news import (
PolygonTickerNews,
)
from langchain_community.tools.powerbi.tool import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
from langchain_community.tools.pubmed.tool import (
PubmedQueryRun,
)
from langchain_community.tools.reddit_search.tool import (
RedditSearchRun,
RedditSearchSchema,
)
from langchain_community.tools.requests.tool import (
BaseRequestsTool,
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain_community.tools.scenexplain.tool import (
SceneXplainTool,
)
from langchain_community.tools.searchapi.tool import (
SearchAPIResults,
SearchAPIRun,
)
from langchain_community.tools.searx_search.tool import (
SearxSearchResults,
SearxSearchRun,
)
from langchain_community.tools.shell.tool import (
ShellTool,
)
from langchain_community.tools.slack.get_channel import (
SlackGetChannel,
)
from langchain_community.tools.slack.get_message import (
SlackGetMessage,
)
from langchain_community.tools.slack.schedule_message import (
SlackScheduleMessage,
)
from langchain_community.tools.slack.send_message import (
SlackSendMessage,
)
from langchain_community.tools.sleep.tool import (
SleepTool,
)
from langchain_community.tools.spark_sql.tool import (
BaseSparkSQLTool,
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
from langchain_community.tools.sql_database.tool import (
BaseSQLDatabaseTool,
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.tools.stackexchange.tool import (
StackExchangeTool,
)
from langchain_community.tools.steam.tool import (
SteamWebAPIQueryRun,
)
from langchain_community.tools.steamship_image_generation import (
SteamshipImageGenerationTool,
)
from langchain_community.tools.tavily_search import (
TavilyAnswer,
TavilySearchResults,
)
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
from langchain_community.tools.wikipedia.tool import (
WikipediaQueryRun,
)
from langchain_community.tools.wolfram_alpha.tool import (
WolframAlphaQueryRun,
)
from langchain_community.tools.yahoo_finance_news import (
YahooFinanceNewsTool,
)
from langchain_community.tools.you.tool import (
YouSearchTool,
)
from langchain_community.tools.youtube.search import (
YouTubeSearchTool,
)
from langchain_community.tools.zapier.tool import (
ZapierNLAListActions,
ZapierNLARunAction,
)
from langchain_community.tools.zenguard.tool import (
Detector,
ZenGuardInput,
ZenGuardTool,
)
__all__ = [
"BaseTool",
"Tool",
"tool",
"StructuredTool",
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AskNewsSearch",
"AzureAiServicesDocumentIntelligenceTool",
"AzureAiServicesImageAnalysisTool",
"AzureAiServicesSpeechToTextTool",
"AzureAiServicesTextAnalyticsForHealthTool",
"AzureAiServicesTextToSpeechTool",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BalanceSheets",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"CashFlowStatements",
"ClickTool",
"CogniswitchKnowledgeRequest",
"CogniswitchKnowledgeSourceFile",
"CogniswitchKnowledgeSourceURL",
"CogniswitchKnowledgeStatus",
"ConneryAction",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DataheraldTextToSQL",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleBooksQueryRun",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"HumanInputRun",
"IFTTTWebhook",
"IncomeStatements",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JinaSearch",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MerriamWebsterQueryRun",
"MetaphorSearchResults",
"MojeekSearch",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PolygonAggregates",
"PolygonFinancials",
"PolygonLastQuote",
"PolygonTickerNews",
"PubmedQueryRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RedditSearchRun",
"RedditSearchSchema",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SceneXplainTool",
"SearchAPIResults",
"SearchAPIRun",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StackExchangeTool",
"StdInInquireTool",
"SteamWebAPIQueryRun",
"SteamshipImageGenerationTool",
"TavilyAnswer",
"TavilySearchResults",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouSearchTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"Detector",
"ZenGuardInput",
"ZenGuardTool",
"authenticate",
"format_tool_to_openai_function",
]
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
_module_lookup = {
"AINAppOps": "langchain_community.tools.ainetwork.app",
"AINOwnerOps": "langchain_community.tools.ainetwork.owner",
"AINRuleOps": "langchain_community.tools.ainetwork.rule",
"AINTransfer": "langchain_community.tools.ainetwork.transfer",
"AINValueOps": "langchain_community.tools.ainetwork.value",
"AIPluginTool": "langchain_community.tools.plugin",
"APIOperation": "langchain_community.tools.openapi.utils.api_models",
"ArxivQueryRun": "langchain_community.tools.arxiv.tool",
"AskNewsSearch": "langchain_community.tools.asknews.tool",
"AzureAiServicesDocumentIntelligenceTool": "langchain_community.tools.azure_ai_services", # noqa: E501
"AzureAiServicesImageAnalysisTool": "langchain_community.tools.azure_ai_services",
"AzureAiServicesSpeechToTextTool": "langchain_community.tools.azure_ai_services",
"AzureAiServicesTextToSpeechTool": "langchain_community.tools.azure_ai_services",
"AzureAiServicesTextAnalyticsForHealthTool": "langchain_community.tools.azure_ai_services", # noqa: E501
"AzureCogsFormRecognizerTool": "langchain_community.tools.azure_cognitive_services",
"AzureCogsImageAnalysisTool": "langchain_community.tools.azure_cognitive_services",
"AzureCogsSpeech2TextTool": "langchain_community.tools.azure_cognitive_services",
"AzureCogsText2SpeechTool": "langchain_community.tools.azure_cognitive_services",
"AzureCogsTextAnalyticsHealthTool": "langchain_community.tools.azure_cognitive_services", # noqa: E501
"BalanceSheets": "langchain_community.tools.financial_datasets.balance_sheets",
"BaseGraphQLTool": "langchain_community.tools.graphql.tool",
"BaseRequestsTool": "langchain_community.tools.requests.tool",
"BaseSQLDatabaseTool": "langchain_community.tools.sql_database.tool",
"BaseSparkSQLTool": "langchain_community.tools.spark_sql.tool",
"BaseTool": "langchain_core.tools",
"BearlyInterpreterTool": "langchain_community.tools.bearly.tool",
"BingSearchResults": "langchain_community.tools.bing_search.tool",
"BingSearchRun": "langchain_community.tools.bing_search.tool",
"BraveSearch": "langchain_community.tools.brave_search.tool",
"CashFlowStatements": "langchain_community.tools.financial_datasets.cash_flow_statements", # noqa: E501
"ClickTool": "langchain_community.tools.playwright",
"CogniswitchKnowledgeRequest": "langchain_community.tools.cogniswitch.tool",
"CogniswitchKnowledgeSourceFile": "langchain_community.tools.cogniswitch.tool",
"CogniswitchKnowledgeSourceURL": "langchain_community.tools.cogniswitch.tool",
"CogniswitchKnowledgeStatus": "langchain_community.tools.cogniswitch.tool",
"ConneryAction": "langchain_community.tools.connery",
"CopyFileTool": "langchain_community.tools.file_management",
"CurrentWebPageTool": "langchain_community.tools.playwright",
"DataheraldTextToSQL": "langchain_community.tools.dataherald.tool",
"DeleteFileTool": "langchain_community.tools.file_management",
"Detector": "langchain_community.tools.zenguard.tool",
"DuckDuckGoSearchResults": "langchain_community.tools.ddg_search.tool",
"DuckDuckGoSearchRun": "langchain_community.tools.ddg_search.tool",
"E2BDataAnalysisTool": "langchain_community.tools.e2b_data_analysis.tool",
"EdenAiExplicitImageTool": "langchain_community.tools.edenai",
"EdenAiObjectDetectionTool": "langchain_community.tools.edenai",
"EdenAiParsingIDTool": "langchain_community.tools.edenai",
"EdenAiParsingInvoiceTool": "langchain_community.tools.edenai",
"EdenAiSpeechToTextTool": "langchain_community.tools.edenai",
"EdenAiTextModerationTool": "langchain_community.tools.edenai",
"EdenAiTextToSpeechTool": "langchain_community.tools.edenai",
"EdenaiTool": "langchain_community.tools.edenai",
"ElevenLabsText2SpeechTool": "langchain_community.tools.eleven_labs.text2speech",
"ExtractHyperlinksTool": "langchain_community.tools.playwright",
"ExtractTextTool": "langchain_community.tools.playwright",
"FileSearchTool": "langchain_community.tools.file_management",
"GetElementsTool": "langchain_community.tools.playwright",
"GmailCreateDraft": "langchain_community.tools.gmail",
"GmailGetMessage": "langchain_community.tools.gmail",
"GmailGetThread": "langchain_community.tools.gmail",
"GmailSearch": "langchain_community.tools.gmail",
"GmailSendMessage": "langchain_community.tools.gmail",
"GoogleBooksQueryRun": "langchain_community.tools.google_books",
"GoogleCloudTextToSpeechTool": "langchain_community.tools.google_cloud.texttospeech", # noqa: E501
"GooglePlacesTool": "langchain_community.tools.google_places.tool",
"GoogleSearchResults": "langchain_community.tools.google_search.tool",
"GoogleSearchRun": "langchain_community.tools.google_search.tool",
"GoogleSerperResults": "langchain_community.tools.google_serper.tool",
"GoogleSerperRun": "langchain_community.tools.google_serper.tool",
"HumanInputRun": "langchain_community.tools.human.tool",
"IFTTTWebhook": "langchain_community.tools.ifttt",
"IncomeStatements": "langchain_community.tools.financial_datasets.income_statements", # noqa: E501
"InfoPowerBITool": "langchain_community.tools.powerbi.tool",
"InfoSQLDatabaseTool": "langchain_community.tools.sql_database.tool",
"InfoSparkSQLTool": "langchain_community.tools.spark_sql.tool",
"JiraAction": "langchain_community.tools.jira.tool",
"JinaSearch": "langchain_community.tools.jina_search.tool",
"JsonGetValueTool": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools.json.tool",
"ListDirectoryTool": "langchain_community.tools.file_management",
"ListPowerBITool": "langchain_community.tools.powerbi.tool",
"ListSQLDatabaseTool": "langchain_community.tools.sql_database.tool",
"ListSparkSQLTool": "langchain_community.tools.spark_sql.tool",
"MerriamWebsterQueryRun": "langchain_community.tools.merriam_webster.tool",
"MetaphorSearchResults": "langchain_community.tools.metaphor_search",
"MojeekSearch": "langchain_community.tools.mojeek_search.tool",
"MoveFileTool": "langchain_community.tools.file_management",
"NasaAction": "langchain_community.tools.nasa.tool",
"NavigateBackTool": "langchain_community.tools.playwright",
"NavigateTool": "langchain_community.tools.playwright",
"O365CreateDraftMessage": "langchain_community.tools.office365.create_draft_message", # noqa: E501
"O365SearchEmails": "langchain_community.tools.office365.messages_search",
"O365SearchEvents": "langchain_community.tools.office365.events_search",
"O365SendEvent": "langchain_community.tools.office365.send_event",
"O365SendMessage": "langchain_community.tools.office365.send_message",
"OpenAPISpec": "langchain_community.tools.openapi.utils.openapi_utils",
"OpenWeatherMapQueryRun": "langchain_community.tools.openweathermap.tool",
"PolygonAggregates": "langchain_community.tools.polygon.aggregates",
"PolygonFinancials": "langchain_community.tools.polygon.financials",
"PolygonLastQuote": "langchain_community.tools.polygon.last_quote",
"PolygonTickerNews": "langchain_community.tools.polygon.ticker_news",
"PubmedQueryRun": "langchain_community.tools.pubmed.tool",
"QueryCheckerTool": "langchain_community.tools.spark_sql.tool",
"QueryPowerBITool": "langchain_community.tools.powerbi.tool",
"QuerySQLCheckerTool": "langchain_community.tools.sql_database.tool",
"QuerySQLDataBaseTool": "langchain_community.tools.sql_database.tool",
"QuerySparkSQLTool": "langchain_community.tools.spark_sql.tool",
"ReadFileTool": "langchain_community.tools.file_management",
"RedditSearchRun": "langchain_community.tools.reddit_search.tool",
"RedditSearchSchema": "langchain_community.tools.reddit_search.tool",
"RequestsDeleteTool": "langchain_community.tools.requests.tool",
"RequestsGetTool": "langchain_community.tools.requests.tool",
"RequestsPatchTool": "langchain_community.tools.requests.tool",
"RequestsPostTool": "langchain_community.tools.requests.tool",
"RequestsPutTool": "langchain_community.tools.requests.tool",
"SceneXplainTool": "langchain_community.tools.scenexplain.tool",
"SearchAPIResults": "langchain_community.tools.searchapi.tool",
"SearchAPIRun": "langchain_community.tools.searchapi.tool",
"SearxSearchResults": "langchain_community.tools.searx_search.tool",
"SearxSearchRun": "langchain_community.tools.searx_search.tool",
"ShellTool": "langchain_community.tools.shell.tool",
"SlackGetChannel": "langchain_community.tools.slack.get_channel",
"SlackGetMessage": "langchain_community.tools.slack.get_message",
"SlackScheduleMessage": "langchain_community.tools.slack.schedule_message",
"SlackSendMessage": "langchain_community.tools.slack.send_message",
"SleepTool": "langchain_community.tools.sleep.tool",
"StackExchangeTool": "langchain_community.tools.stackexchange.tool",
"StdInInquireTool": "langchain_community.tools.interaction.tool",
"SteamWebAPIQueryRun": "langchain_community.tools.steam.tool",
"SteamshipImageGenerationTool": "langchain_community.tools.steamship_image_generation", # noqa: E501
"StructuredTool": "langchain_core.tools",
"TavilyAnswer": "langchain_community.tools.tavily_search",
"TavilySearchResults": "langchain_community.tools.tavily_search",
"Tool": "langchain_core.tools",
"VectorStoreQATool": "langchain_community.tools.vectorstore.tool",
"VectorStoreQAWithSourcesTool": "langchain_community.tools.vectorstore.tool",
"WikipediaQueryRun": "langchain_community.tools.wikipedia.tool",
"WolframAlphaQueryRun": "langchain_community.tools.wolfram_alpha.tool",
"WriteFileTool": "langchain_community.tools.file_management",
"YahooFinanceNewsTool": "langchain_community.tools.yahoo_finance_news",
"YouSearchTool": "langchain_community.tools.you.tool",
"YouTubeSearchTool": "langchain_community.tools.youtube.search",
"ZapierNLAListActions": "langchain_community.tools.zapier.tool",
"ZapierNLARunAction": "langchain_community.tools.zapier.tool",
"ZenGuardInput": "langchain_community.tools.zenguard.tool",
"ZenGuardTool": "langchain_community.tools.zenguard.tool",
"authenticate": "langchain_community.tools.office365.utils",
"format_tool_to_openai_function": "langchain_community.tools.convert_to_openai",
"tool": "langchain_core.tools",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/convert_to_openai.py | from langchain_core.utils.function_calling import (
format_tool_to_openai_function,
format_tool_to_openai_tool,
)
__all__ = ["format_tool_to_openai_function", "format_tool_to_openai_tool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/tools/yahoo_finance_news.py | from typing import Iterable, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain_community.document_loaders.web_base import WebBaseLoader
class YahooFinanceNewsInput(BaseModel):
"""Input for the YahooFinanceNews tool."""
query: str = Field(description="company ticker query to look up")
class YahooFinanceNewsTool(BaseTool): # type: ignore[override, override]
"""Tool that searches financial news on Yahoo Finance."""
name: str = "yahoo_finance_news"
description: str = (
"Useful for when you need to find financial news "
"about a public company. "
"Input should be a company ticker. "
"For example, AAPL for Apple, MSFT for Microsoft."
)
top_k: int = 10
"""The number of results to return."""
args_schema: Type[BaseModel] = YahooFinanceNewsInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f"Company ticker {query} not found."
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {query} not found."
links = []
try:
links = [n["link"] for n in company.news if n["type"] == "STORY"]
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f"No news found for company that searched with {query} ticker."
if not links:
return f"No news found for company that searched with {query} ticker."
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f"No news found for company that searched with {query} ticker."
return result
@staticmethod
def _format_results(docs: Iterable[Document], query: str) -> str:
doc_strings = [
"\n".join([doc.metadata["title"], doc.metadata["description"]])
for doc in docs
if query in doc.metadata["description"] or query in doc.metadata["title"]
]
return "\n\n".join(doc_strings)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/value.py | import builtins
import json
from typing import Optional, Type, Union
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class ValueSchema(BaseModel):
"""Schema for value operations."""
type: OperationType = Field(...)
path: str = Field(..., description="Blockchain reference path")
value: Optional[Union[int, str, float, dict]] = Field(
None, description="Value to be set at the path"
)
class AINValueOps(AINBaseTool): # type: ignore[override, override]
"""Tool for value operations."""
name: str = "AINvalueOps"
description: str = """
Covers the read and write value for the AINetwork Blockchain database.
## SET
- Set a value at a given path
### Example
- type: SET
- path: /apps/langchain_test_1/object
- value: {1: 2, "34": 56}
## GET
- Retrieve a value at a given path
### Example
- type: GET
- path: /apps/langchain_test_1/DB
## Special paths
- `/accounts/<address>/balance`: Account balance
- `/accounts/<address>/nonce`: Account nonce
- `/apps`: Applications
- `/consensus`: Consensus
- `/checkin`: Check-in
- `/deposit/<service id>/<address>/<deposit id>`: Deposit
- `/deposit_accounts/<service id>/<address>/<account id>`: Deposit accounts
- `/escrow`: Escrow
- `/payments`: Payment
- `/sharding`: Sharding
- `/token/name`: Token name
- `/token/symbol`: Token symbol
- `/token/total_supply`: Token total supply
- `/transfer/<address from>/<address to>/<key>/value`: Transfer
- `/withdraw/<service id>/<address>/<withdraw id>`: Withdraw
"""
args_schema: Type[BaseModel] = ValueSchema
async def _arun(
self,
type: OperationType,
path: str,
value: Optional[Union[int, str, float, dict]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
try:
if type is OperationType.SET:
if value is None:
raise ValueError("'value' is required for SET operation.")
res = await self.interface.db.ref(path).setValue(
transactionInput=ValueOnlyTransactionInput(value=value)
)
elif type is OperationType.GET:
res = await self.interface.db.ref(path).getValue()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/base.py | from __future__ import annotations
import asyncio
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.ainetwork.utils import authenticate
if TYPE_CHECKING:
from ain.ain import Ain
class OperationType(str, Enum):
"""Type of operation as enumerator."""
SET = "SET"
GET = "GET"
class AINBaseTool(BaseTool): # type: ignore[override]
"""Base class for the AINetwork tools."""
interface: Ain = Field(default_factory=authenticate)
"""The interface object for the AINetwork Blockchain."""
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
result_container = []
def thread_target() -> None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(
new_loop.run_until_complete(self._arun(*args, **kwargs))
)
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
thread = threading.Thread(target=thread_target)
thread.start()
thread.join()
result = result_container[0]
if isinstance(result, Exception):
raise result
return result
else:
result = loop.run_until_complete(self._arun(*args, **kwargs))
loop.close()
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/rule.py | import builtins
import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class RuleSchema(BaseModel):
"""Schema for owner operations."""
type: OperationType = Field(...)
path: str = Field(..., description="Path on the blockchain where the rule applies")
eval: Optional[str] = Field(None, description="eval string to determine permission")
class AINRuleOps(AINBaseTool): # type: ignore[override, override]
"""Tool for owner operations."""
name: str = "AINruleOps"
description: str = """
Covers the write `rule` for the AINetwork Blockchain database. The SET type specifies write permissions using the `eval` variable as a JavaScript eval string.
In order to AINvalueOps with SET at the path, the execution result of the `eval` string must be true.
## Path Rules
1. Allowed characters for directory: `[a-zA-Z_0-9]`
2. Use `$<key>` for template variables as directory.
## Eval String Special Variables
- auth.addr: Address of the writer for the path
- newData: New data for the path
- data: Current data for the path
- currentTime: Time in seconds
- lastBlockNumber: Latest processed block number
## Eval String Functions
- getValue(<path>)
- getRule(<path>)
- getOwner(<path>)
- getFunction(<path>)
- evalRule(<path>, <value to set>, auth, currentTime)
- evalOwner(<path>, 'write_owner', auth)
## SET Example
- type: SET
- path: /apps/langchain_project_1/$from/$to/$img
- eval: auth.addr===$from&&!getValue('/apps/image_db/'+$img)
## GET Example
- type: GET
- path: /apps/langchain_project_1
""" # noqa: E501
args_schema: Type[BaseModel] = RuleSchema
async def _arun(
self,
type: OperationType,
path: str,
eval: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
try:
if type is OperationType.SET:
if eval is None:
raise ValueError("'eval' is required for SET operation.")
res = await self.interface.db.ref(path).setRule(
transactionInput=ValueOnlyTransactionInput(
value={".rule": {"write": eval}}
)
)
elif type is OperationType.GET:
res = await self.interface.db.ref(path).getRule()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/transfer.py | import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class TransferSchema(BaseModel):
"""Schema for transfer operations."""
address: str = Field(..., description="Address to transfer AIN to")
amount: int = Field(..., description="Amount of AIN to transfer")
class AINTransfer(AINBaseTool): # type: ignore[override, override]
"""Tool for transfer operations."""
name: str = "AINtransfer"
description: str = "Transfers AIN to a specified address"
args_schema: Type[TransferSchema] = TransferSchema
async def _arun(
self,
address: str,
amount: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
try:
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{type(e).__name__}: {str(e)}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/utils.py | """AINetwork Blockchain tool utils."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Literal, Optional
if TYPE_CHECKING:
from ain.ain import Ain
def authenticate(network: Optional[Literal["mainnet", "testnet"]] = "testnet") -> Ain:
"""Authenticate using the AIN Blockchain"""
try:
from ain.ain import Ain
except ImportError as e:
raise ImportError(
"Cannot import ain-py related modules. Please install the package with "
"`pip install ain-py`."
) from e
if network == "mainnet":
provider_url = "https://mainnet-api.ainetwork.ai/"
chain_id = 1
if "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ:
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
else:
raise EnvironmentError(
"Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable "
"has not been set."
)
elif network == "testnet":
provider_url = "https://testnet-api.ainetwork.ai/"
chain_id = 0
if "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ:
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
else:
raise EnvironmentError(
"Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable "
"has not been set."
)
elif network is None:
if (
"AIN_BLOCKCHAIN_PROVIDER_URL" in os.environ
and "AIN_BLOCKCHAIN_CHAIN_ID" in os.environ
and "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ
):
provider_url = os.environ["AIN_BLOCKCHAIN_PROVIDER_URL"]
chain_id = int(os.environ["AIN_BLOCKCHAIN_CHAIN_ID"])
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
else:
raise EnvironmentError(
"Error: The AIN_BLOCKCHAIN_PROVIDER_URL and "
"AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY and AIN_BLOCKCHAIN_CHAIN_ID "
"environmental variable has not been set."
)
else:
raise ValueError(f"Unsupported 'network': {network}")
ain = Ain(provider_url, chain_id)
ain.wallet.addAndSetDefaultAccount(private_key)
return ain
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/owner.py | import builtins
import json
from typing import List, Optional, Type, Union
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class RuleSchema(BaseModel):
"""Schema for owner operations."""
type: OperationType = Field(...)
path: str = Field(..., description="Blockchain reference path")
address: Optional[Union[str, List[str]]] = Field(
None, description="A single address or a list of addresses"
)
write_owner: Optional[bool] = Field(
False, description="Authority to edit the `owner` property of the path"
)
write_rule: Optional[bool] = Field(
False, description="Authority to edit `write rule` for the path"
)
write_function: Optional[bool] = Field(
False, description="Authority to `set function` for the path"
)
branch_owner: Optional[bool] = Field(
False, description="Authority to initialize `owner` of sub-paths"
)
class AINOwnerOps(AINBaseTool): # type: ignore[override, override]
"""Tool for owner operations."""
name: str = "AINownerOps"
description: str = """
Rules for `owner` in AINetwork Blockchain database.
An address set as `owner` can modify permissions according to its granted authorities
## Path Rule
- (/[a-zA-Z_0-9]+)+
- Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located.
## Address Rules
- 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address
- *: All addresses permitted
- Defaults to the current session's address
## SET
- `SET` alters permissions for specific addresses, while other addresses remain unaffected.
- When removing an address of `owner`, set all authorities for that address to false.
- message `write_owner permission evaluated false` if fail
### Example
- type: SET
- path: /apps/langchain
- address: [<address 1>, <address 2>]
- write_owner: True
- write_rule: True
- write_function: True
- branch_owner: True
## GET
- Provides all addresses with `owner` permissions and their authorities in the path.
### Example
- type: GET
- path: /apps/langchain
""" # noqa: E501
args_schema: Type[BaseModel] = RuleSchema
async def _arun(
self,
type: OperationType,
path: str,
address: Optional[Union[str, List[str]]] = None,
write_owner: Optional[bool] = None,
write_rule: Optional[bool] = None,
write_function: Optional[bool] = None,
branch_owner: Optional[bool] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
try:
if type is OperationType.SET:
if address is None:
address = self.interface.wallet.defaultAccount.address
if isinstance(address, str):
address = [address]
res = await self.interface.db.ref(path).setOwner(
transactionInput=ValueOnlyTransactionInput(
value={
".owner": {
"owners": {
address: {
"write_owner": write_owner or False,
"write_rule": write_rule or False,
"write_function": write_function or False,
"branch_owner": branch_owner or False,
}
for address in address
}
}
}
)
)
elif type is OperationType.GET:
res = await self.interface.db.ref(path).getOwner()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ainetwork/app.py | import builtins
import json
from enum import Enum
from typing import List, Optional, Type, Union
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class AppOperationType(str, Enum):
"""Type of app operation as enumerator."""
SET_ADMIN = "SET_ADMIN"
GET_CONFIG = "GET_CONFIG"
class AppSchema(BaseModel):
"""Schema for app operations."""
type: AppOperationType = Field(...)
appName: str = Field(..., description="Name of the application on the blockchain")
address: Optional[Union[str, List[str]]] = Field(
None,
description=(
"A single address or a list of addresses. Default: current session's "
"address"
),
)
class AINAppOps(AINBaseTool): # type: ignore[override, override]
"""Tool for app operations."""
name: str = "AINappOps"
description: str = """
Create an app in the AINetwork Blockchain database by creating the /apps/<appName> path.
An address set as `admin` can grant `owner` rights to other addresses (refer to `AINownerOps` for more details).
Also, `admin` is initialized to have all `owner` permissions and `rule` allowed for that path.
## appName Rule
- [a-z_0-9]+
## address Rules
- 0x[0-9a-fA-F]{40}
- Defaults to the current session's address
- Multiple addresses can be specified if needed
## SET_ADMIN Example 1
- type: SET_ADMIN
- appName: ain_project
### Result:
1. Path /apps/ain_project created.
2. Current session's address registered as admin.
## SET_ADMIN Example 2
- type: SET_ADMIN
- appName: test_project
- address: [<address1>, <address2>]
### Result:
1. Path /apps/test_project created.
2. <address1> and <address2> registered as admin.
""" # noqa: E501
args_schema: Type[BaseModel] = AppSchema
async def _arun(
self,
type: AppOperationType,
appName: str,
address: Optional[Union[str, List[str]]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
from ain.utils import getTimestamp
try:
if type is AppOperationType.SET_ADMIN:
if address is None:
address = self.interface.wallet.defaultAccount.address
if isinstance(address, str):
address = [address]
res = await self.interface.db.ref(
f"/manage_app/{appName}/create/{getTimestamp()}"
).setValue(
transactionInput=ValueOnlyTransactionInput(
value={"admin": {address: True for address in address}}
)
)
elif type is AppOperationType.GET_CONFIG:
res = await self.interface.db.ref(
f"/manage_app/{appName}/config"
).getValue()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/dataforseo_api_search/tool.py | """Tool for the DataForSeo SERP API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
class DataForSeoAPISearchRun(BaseTool): # type: ignore[override]
"""Tool that queries the DataForSeo Google search API."""
name: str = "dataforseo_api_search"
description: str = (
"A robust Google Search API provided by DataForSeo."
"This tool is handy when you need information about trending topics "
"or current events."
)
api_wrapper: DataForSeoAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.run(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.arun(query)).__str__()
class DataForSeoAPISearchResults(BaseTool): # type: ignore[override]
"""Tool that queries the DataForSeo Google Search API
and get back json."""
name: str = "dataforseo_results_json"
description: str = (
"A comprehensive Google Search API provided by DataForSeo."
"This tool is useful for obtaining real-time data on current events "
"or popular searches."
"The input should be a search query and the output is a JSON object "
"of the query results."
)
api_wrapper: DataForSeoAPIWrapper = Field(default_factory=DataForSeoAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/dataforseo_api_search/__init__.py | from langchain_community.tools.dataforseo_api_search.tool import (
DataForSeoAPISearchResults,
DataForSeoAPISearchRun,
)
"""DataForSeo API Toolkit."""
"""Tool for the DataForSeo SERP API."""
__all__ = ["DataForSeoAPISearchRun", "DataForSeoAPISearchResults"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/powerbi/tool.py | """Tools for interacting with a Power BI dataset."""
import logging
from time import perf_counter
from typing import Any, Dict, Optional, Tuple
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import ConfigDict, Field, model_validator
from langchain_community.chat_models.openai import _import_tiktoken
from langchain_community.tools.powerbi.prompt import (
BAD_REQUEST_RESPONSE,
DEFAULT_FEWSHOT_EXAMPLES,
RETRY_RESPONSE,
)
from langchain_community.utilities.powerbi import PowerBIDataset, json_to_md
logger = logging.getLogger(__name__)
class QueryPowerBITool(BaseTool): # type: ignore[override]
"""Tool for querying a Power BI Dataset."""
name: str = "query_powerbi"
description: str = """
Input to this tool is a detailed question about the dataset, output is a result from the dataset. It will try to answer the question using the dataset, and if it cannot, it will ask for clarification.
Example Input: "How many rows are in table1?"
""" # noqa: E501
llm_chain: Any = None
powerbi: PowerBIDataset = Field(exclude=True)
examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES
session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True)
max_iterations: int = 5
output_token_limit: int = 4000
tiktoken_model_name: Optional[str] = None # "cl100k_base"
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_llm_chain_input_variables( # pylint: disable=E0213
cls, values: dict
) -> dict:
"""Make sure the LLM chain has the correct input variables."""
llm_chain = values["llm_chain"]
for var in llm_chain.prompt.input_variables:
if var not in ["tool_input", "tables", "schemas", "examples"]:
raise ValueError(
"LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: E501 # pylint: disable=C0301
llm_chain.prompt.input_variables,
)
return values
def _check_cache(self, tool_input: str) -> Optional[str]:
"""Check if the input is present in the cache.
If the value is a bad request, overwrite with the escalated version,
if not present return None."""
if tool_input not in self.session_cache:
return None
return self.session_cache[tool_input]
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Execute the query, return the results or an error message."""
if cache := self._check_cache(tool_input):
logger.debug("Found cached result for %s: %s", tool_input, cache)
return cache
try:
logger.info("Running PBI Query Tool with input: %s", tool_input)
query = self.llm_chain.predict(
tool_input=tool_input,
tables=self.powerbi.get_table_names(),
schemas=self.powerbi.get_schemas(),
examples=self.examples,
callbacks=run_manager.get_child() if run_manager else None,
)
except Exception as exc: # pylint: disable=broad-except
self.session_cache[tool_input] = f"Error on call to LLM: {exc}"
return self.session_cache[tool_input]
if query == "I cannot answer this":
self.session_cache[tool_input] = query
return self.session_cache[tool_input]
logger.info("PBI Query:\n%s", query)
start_time = perf_counter()
pbi_result = self.powerbi.run(command=query)
end_time = perf_counter()
logger.debug("PBI Result: %s", pbi_result)
logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}")
result, error = self._parse_output(pbi_result)
if error is not None and "TokenExpired" in error:
self.session_cache[tool_input] = (
"Authentication token expired or invalid, please try reauthenticate."
)
return self.session_cache[tool_input]
iterations = kwargs.get("iterations", 0)
if error and iterations < self.max_iterations:
return self._run(
tool_input=RETRY_RESPONSE.format(
tool_input=tool_input, query=query, error=error
),
run_manager=run_manager,
iterations=iterations + 1,
)
self.session_cache[tool_input] = (
result if result else BAD_REQUEST_RESPONSE.format(error=error)
)
return self.session_cache[tool_input]
async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Execute the query, return the results or an error message."""
if cache := self._check_cache(tool_input):
logger.debug("Found cached result for %s: %s", tool_input, cache)
return f"{cache}, from cache, you have already asked this question."
try:
logger.info("Running PBI Query Tool with input: %s", tool_input)
query = await self.llm_chain.apredict(
tool_input=tool_input,
tables=self.powerbi.get_table_names(),
schemas=self.powerbi.get_schemas(),
examples=self.examples,
callbacks=run_manager.get_child() if run_manager else None,
)
except Exception as exc: # pylint: disable=broad-except
self.session_cache[tool_input] = f"Error on call to LLM: {exc}"
return self.session_cache[tool_input]
if query == "I cannot answer this":
self.session_cache[tool_input] = query
return self.session_cache[tool_input]
logger.info("PBI Query: %s", query)
start_time = perf_counter()
pbi_result = await self.powerbi.arun(command=query)
end_time = perf_counter()
logger.debug("PBI Result: %s", pbi_result)
logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}")
result, error = self._parse_output(pbi_result)
if error is not None and ("TokenExpired" in error or "TokenError" in error):
self.session_cache[tool_input] = (
"Authentication token expired or invalid, please try to reauthenticate or check the scope of the credential." # noqa: E501
)
return self.session_cache[tool_input]
iterations = kwargs.get("iterations", 0)
if error and iterations < self.max_iterations:
return await self._arun(
tool_input=RETRY_RESPONSE.format(
tool_input=tool_input, query=query, error=error
),
run_manager=run_manager,
iterations=iterations + 1,
)
self.session_cache[tool_input] = (
result if result else BAD_REQUEST_RESPONSE.format(error=error)
)
return self.session_cache[tool_input]
def _parse_output(
self, pbi_result: Dict[str, Any]
) -> Tuple[Optional[str], Optional[Any]]:
"""Parse the output of the query to a markdown table."""
if "results" in pbi_result:
rows = pbi_result["results"][0]["tables"][0]["rows"]
if len(rows) == 0:
logger.info("0 records in result, query was valid.")
return (
None,
"0 rows returned, this might be correct, but please validate if all filter values were correct?", # noqa: E501
)
result = json_to_md(rows)
too_long, length = self._result_too_large(result)
if too_long:
return (
f"Result too large, please try to be more specific or use the `TOPN` function. The result is {length} tokens long, the limit is {self.output_token_limit} tokens.", # noqa: E501
None,
)
return result, None
if "error" in pbi_result:
if (
"pbi.error" in pbi_result["error"]
and "details" in pbi_result["error"]["pbi.error"]
):
return None, pbi_result["error"]["pbi.error"]["details"][0]["detail"]
return None, pbi_result["error"]
return None, pbi_result
def _result_too_large(self, result: str) -> Tuple[bool, int]:
"""Tokenize the output of the query."""
if self.tiktoken_model_name:
tiktoken_ = _import_tiktoken()
encoding = tiktoken_.encoding_for_model(self.tiktoken_model_name)
length = len(encoding.encode(result))
logger.info("Result length: %s", length)
return length > self.output_token_limit, length
return False, 0
class InfoPowerBITool(BaseTool): # type: ignore[override]
"""Tool for getting metadata about a PowerBI Dataset."""
name: str = "schema_powerbi"
description: str = """
Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables.
Be sure that the tables actually exist by calling list_tables_powerbi first!
Example Input: "table1, table2, table3"
""" # noqa: E501
powerbi: PowerBIDataset = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the schema for tables in a comma-separated list."""
return self.powerbi.get_table_info(tool_input.split(", "))
async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return await self.powerbi.aget_table_info(tool_input.split(", "))
class ListPowerBITool(BaseTool): # type: ignore[override]
"""Tool for getting tables names."""
name: str = "list_tables_powerbi"
description: str = "Input is an empty string, output is a comma separated list of tables in the database." # noqa: E501 # pylint: disable=C0301
powerbi: PowerBIDataset = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _run(
self,
tool_input: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the names of the tables."""
return ", ".join(self.powerbi.get_table_names())
async def _arun(
self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Get the names of the tables."""
return ", ".join(self.powerbi.get_table_names())
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/powerbi/__init__.py | """Tools for interacting with a PowerBI dataset."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/powerbi/prompt.py | # flake8: noqa
QUESTION_TO_QUERY_BASE = """
Answer the question below with a DAX query that can be sent to Power BI. DAX queries have a simple syntax comprised of just one required keyword, EVALUATE, and several optional keywords: ORDER BY, START AT, DEFINE, MEASURE, VAR, TABLE, and COLUMN. Each keyword defines a statement used for the duration of the query. Any time < or > are used in the text below it means that those values need to be replaced by table, columns or other things. If the question is not something you can answer with a DAX query, reply with "I cannot answer this" and the question will be escalated to a human.
Some DAX functions return a table instead of a scalar, and must be wrapped in a function that evaluates the table and returns a scalar; unless the table is a single column, single row table, then it is treated as a scalar value. Most DAX functions require one or more arguments, which can include tables, columns, expressions, and values. However, some functions, such as PI, do not require any arguments, but always require parentheses to indicate the null argument. For example, you must always type PI(), not PI. You can also nest functions within other functions.
Some commonly used functions are:
EVALUATE <table> - At the most basic level, a DAX query is an EVALUATE statement containing a table expression. At least one EVALUATE statement is required, however, a query can contain any number of EVALUATE statements.
EVALUATE <table> ORDER BY <expression> ASC or DESC - The optional ORDER BY keyword defines one or more expressions used to sort query results. Any expression that can be evaluated for each row of the result is valid.
EVALUATE <table> ORDER BY <expression> ASC or DESC START AT <value> or <parameter> - The optional START AT keyword is used inside an ORDER BY clause. It defines the value at which the query results begin.
DEFINE MEASURE | VAR; EVALUATE <table> - The optional DEFINE keyword introduces one or more calculated entity definitions that exist only for the duration of the query. Definitions precede the EVALUATE statement and are valid for all EVALUATE statements in the query. Definitions can be variables, measures, tables1, and columns1. Definitions can reference other definitions that appear before or after the current definition. At least one definition is required if the DEFINE keyword is included in a query.
MEASURE <table name>[<measure name>] = <scalar expression> - Introduces a measure definition in a DEFINE statement of a DAX query.
VAR <name> = <expression> - Stores the result of an expression as a named variable, which can then be passed as an argument to other measure expressions. Once resultant values have been calculated for a variable expression, those values do not change, even if the variable is referenced in another expression.
FILTER(<table>,<filter>) - Returns a table that represents a subset of another table or expression, where <filter> is a Boolean expression that is to be evaluated for each row of the table. For example, [Amount] > 0 or [Region] = "France"
ROW(<name>, <expression>) - Returns a table with a single row containing values that result from the expressions given to each column.
TOPN(<n>, <table>, <OrderBy_Expression>, <Order>) - Returns a table with the top n rows from the specified table, sorted by the specified expression, in the order specified by 0 for descending, 1 for ascending, the default is 0. Multiple OrderBy_Expressions and Order pairs can be given, separated by a comma.
DISTINCT(<column>) - Returns a one-column table that contains the distinct values from the specified column. In other words, duplicate values are removed and only unique values are returned. This function cannot be used to Return values into a cell or column on a worksheet; rather, you nest the DISTINCT function within a formula, to get a list of distinct values that can be passed to another function and then counted, summed, or used for other operations.
DISTINCT(<table>) - Returns a table by removing duplicate rows from another table or expression.
Aggregation functions, names with a A in it, handle booleans and empty strings in appropriate ways, while the same function without A only uses the numeric values in a column. Functions names with an X in it can include a expression as an argument, this will be evaluated for each row in the table and the result will be used in the regular function calculation, these are the functions:
COUNT(<column>), COUNTA(<column>), COUNTX(<table>,<expression>), COUNTAX(<table>,<expression>), COUNTROWS([<table>]), COUNTBLANK(<column>), DISTINCTCOUNT(<column>), DISTINCTCOUNTNOBLANK (<column>) - these are all variations of count functions.
AVERAGE(<column>), AVERAGEA(<column>), AVERAGEX(<table>,<expression>) - these are all variations of average functions.
MAX(<column>), MAXA(<column>), MAXX(<table>,<expression>) - these are all variations of max functions.
MIN(<column>), MINA(<column>), MINX(<table>,<expression>) - these are all variations of min functions.
PRODUCT(<column>), PRODUCTX(<table>,<expression>) - these are all variations of product functions.
SUM(<column>), SUMX(<table>,<expression>) - these are all variations of sum functions.
Date and time functions:
DATE(year, month, day) - Returns a date value that represents the specified year, month, and day.
DATEDIFF(date1, date2, <interval>) - Returns the difference between two date values, in the specified interval, that can be SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, QUARTER, YEAR.
DATEVALUE(<date_text>) - Returns a date value that represents the specified date.
YEAR(<date>), QUARTER(<date>), MONTH(<date>), DAY(<date>), HOUR(<date>), MINUTE(<date>), SECOND(<date>) - Returns the part of the date for the specified date.
Finally, make sure to escape double quotes with a single backslash, and make sure that only table names have single quotes around them, while names of measures or the values of columns that you want to compare against are in escaped double quotes. Newlines are not necessary and can be skipped. The queries are serialized as json and so will have to fit be compliant with json syntax. Sometimes you will get a question, a DAX query and a error, in that case you need to rewrite the DAX query to get the correct answer.
The following tables exist: {tables}
and the schema's for some are given here:
{schemas}
Examples:
{examples}
"""
USER_INPUT = """
Question: {tool_input}
DAX:
"""
SINGLE_QUESTION_TO_QUERY = f"{QUESTION_TO_QUERY_BASE}{USER_INPUT}"
DEFAULT_FEWSHOT_EXAMPLES = """
Question: How many rows are in the table <table>?
DAX: EVALUATE ROW(\"Number of rows\", COUNTROWS(<table>))
----
Question: How many rows are in the table <table> where <column> is not empty?
DAX: EVALUATE ROW(\"Number of rows\", COUNTROWS(FILTER(<table>, <table>[<column>] <> \"\")))
----
Question: What was the average of <column> in <table>?
DAX: EVALUATE ROW(\"Average\", AVERAGE(<table>[<column>]))
----
"""
RETRY_RESPONSE = (
"{tool_input} DAX: {query} Error: {error}. Please supply a new DAX query."
)
BAD_REQUEST_RESPONSE = "Error on this question, the error was {error}, you can try to rephrase the question."
SCHEMA_ERROR_RESPONSE = "Bad request, are you sure the table name is correct?"
UNAUTHORIZED_RESPONSE = "Unauthorized. Try changing your authentication, do not retry."
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/pubmed/tool.py | from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubmedQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the PubMed API."""
name: str = "pub_med"
description: str = (
"A wrapper around PubMed. "
"Useful for when you need to answer questions about medicine, health, "
"and biomedical topics "
"from biomedical literature, MEDLINE, life science journals, and online books. "
"Input should be a search query."
)
api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) # type: ignore[arg-type]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the PubMed tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/pubmed/__init__.py | """PubMed API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/golden_query/tool.py | """Tool for the Golden API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
class GoldenQueryRun(BaseTool): # type: ignore[override]
"""Tool that adds the capability to query using the Golden API and get back JSON."""
name: str = "golden_query"
description: str = (
"A wrapper around Golden Query API."
" Useful for getting entities that match"
" a natural language query from Golden's Knowledge Base."
"\nExample queries:"
"\n- companies in nanotech"
"\n- list of cloud providers starting in 2019"
"\nInput should be the natural language query."
"\nOutput is a paginated list of results or an error object"
" in JSON format."
)
api_wrapper: GoldenQueryAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Golden tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/golden_query/__init__.py | """Golden API toolkit."""
from langchain_community.tools.golden_query.tool import GoldenQueryRun
__all__ = [
"GoldenQueryRun",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_cloud/texttospeech.py | from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Any, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import texttospeech # type: ignore[attr-defined]
def _import_google_cloud_texttospeech() -> Any:
try:
from google.cloud import texttospeech # type: ignore[attr-defined]
except ImportError as e:
raise ImportError(
"Cannot import google.cloud.texttospeech, please install "
"`pip install google-cloud-texttospeech`."
) from e
return texttospeech
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding) -> Optional[str]:
texttospeech = _import_google_cloud_texttospeech()
ENCODING_FILE_EXTENSION_MAP = {
texttospeech.AudioEncoding.LINEAR16: ".wav",
texttospeech.AudioEncoding.MP3: ".mp3",
texttospeech.AudioEncoding.OGG_OPUS: ".ogg",
texttospeech.AudioEncoding.MULAW: ".wav",
texttospeech.AudioEncoding.ALAW: ".wav",
}
return ENCODING_FILE_EXTENSION_MAP.get(encoding)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.TextToSpeechTool",
)
class GoogleCloudTextToSpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Google Cloud Text to Speech API.
In order to set this up, follow instructions at:
https://cloud.google.com/text-to-speech/docs/before-you-begin
"""
name: str = "google_cloud_texttospeech"
description: str = (
"A wrapper around Google Cloud Text-to-Speech. "
"Useful for when you need to synthesize audio from text. "
"It supports multiple languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
_client: Any
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
texttospeech = _import_google_cloud_texttospeech()
super().__init__(**kwargs)
self._client = texttospeech.TextToSpeechClient(
client_info=get_client_info(module="text-to-speech")
)
def _run(
self,
input_text: str,
language_code: str = "en-US",
ssml_gender: Optional[texttospeech.SsmlVoiceGender] = None,
audio_encoding: Optional[texttospeech.AudioEncoding] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
texttospeech = _import_google_cloud_texttospeech()
ssml_gender = ssml_gender or texttospeech.SsmlVoiceGender.NEUTRAL
audio_encoding = audio_encoding or texttospeech.AudioEncoding.MP3
response = self._client.synthesize_speech(
input=texttospeech.SynthesisInput(text=input_text),
voice=texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=ssml_gender
),
audio_config=texttospeech.AudioConfig(audio_encoding=audio_encoding),
)
suffix = _encoding_file_extension_map(audio_encoding)
with tempfile.NamedTemporaryFile(mode="bx", suffix=suffix, delete=False) as f:
f.write(response.audio_content)
return f.name
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_cloud/__init__.py | """Google Cloud Tools."""
from langchain_community.tools.google_cloud.texttospeech import (
GoogleCloudTextToSpeechTool,
)
__all__ = ["GoogleCloudTextToSpeechTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_search/tool.py | """Tool for the Google search API."""
from typing import Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchRun",
)
class GoogleSearchRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google search API."""
name: str = "google_search"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchResults",
)
class GoogleSearchResults(BaseTool): # type: ignore[override]
"""Tool that queries the Google Search API and gets back json."""
name: str = "google_search_results_json"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query. Output is a JSON array of the query results"
)
num_results: int = 4
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query, self.num_results))
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_search/__init__.py | """Google Search API Toolkit."""
from langchain_community.tools.google_search.tool import (
GoogleSearchResults,
GoogleSearchRun,
)
__all__ = ["GoogleSearchRun", "GoogleSearchResults"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/github/tool.py | """
This tool allows agents to interact with the pygithub library
and operate on a GitHub repository.
To use this tool, you must first set as environment variables:
GITHUB_API_TOKEN
GITHUB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.github import GitHubAPIWrapper
class GitHubAction(BaseTool): # type: ignore[override]
"""Tool for interacting with the GitHub API."""
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper) # type: ignore[arg-type]
mode: str
name: str = ""
description: str = ""
args_schema: Optional[Type[BaseModel]] = None
def _run(
self,
instructions: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the GitHub API to run an operation."""
if not instructions or instructions == "{}":
# Catch other forms of empty input that GPT-4 likes to send.
instructions = ""
if self.args_schema is not None:
field_names = list(self.args_schema.schema()["properties"].keys())
if len(field_names) > 1:
raise AssertionError(
f"Expected one argument in tool schema, got {field_names}."
)
if field_names:
field = field_names[0]
else:
field = ""
query = str(kwargs.get(field, ""))
else:
query = instructions
return self.api_wrapper.run(self.mode, query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/github/__init__.py | """GitHub Tool"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/github/prompt.py | # flake8: noqa
GET_ISSUES_PROMPT = """
This tool will fetch a list of the repository's issues. It will return the title, and issue number of 5 issues. It takes no input."""
GET_ISSUE_PROMPT = """
This tool will fetch the title, body, and comment thread of a specific issue. **VERY IMPORTANT**: You must specify the issue number as an integer."""
COMMENT_ON_ISSUE_PROMPT = """
This tool is useful when you need to comment on a GitHub issue. Simply pass in the issue number and the comment you would like to make. Please use this sparingly as we don't want to clutter the comment threads. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify the issue number as an integer
- Then you must place two newlines
- Then you must specify your comment"""
CREATE_PULL_REQUEST_PROMPT = """
This tool is useful when you need to create a new pull request in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify the title of the pull request
- Then you must place two newlines
- Then you must write the body or description of the pull request
When appropriate, always reference relevant issues in the body by using the syntax `closes #<issue_number` like `closes #3, closes #6`.
For example, if you would like to create a pull request called "README updates" with contents "added contributors' names, closes #3", you would pass in the following string:
README updates
added contributors' names, closes #3"""
CREATE_FILE_PROMPT = """
This tool is a wrapper for the GitHub API, useful when you need to create a file in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify which file to create by passing a full file path (**IMPORTANT**: the path must not start with a slash)
- Then you must specify the contents of the file
For example, if you would like to create a file called /test/test.txt with contents "test contents", you would pass in the following string:
test/test.txt
test contents"""
READ_FILE_PROMPT = """
This tool is a wrapper for the GitHub API, useful when you need to read the contents of a file. Simply pass in the full file path of the file you would like to read. **IMPORTANT**: the path must not start with a slash"""
UPDATE_FILE_PROMPT = """
This tool is a wrapper for the GitHub API, useful when you need to update the contents of a file in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify which file to modify by passing a full file path (**IMPORTANT**: the path must not start with a slash)
- Then you must specify the old contents which you would like to replace wrapped in OLD <<<< and >>>> OLD
- Then you must specify the new contents which you would like to replace the old contents with wrapped in NEW <<<< and >>>> NEW
For example, if you would like to replace the contents of the file /test/test.txt from "old contents" to "new contents", you would pass in the following string:
test/test.txt
This is text that will not be changed
OLD <<<<
old contents
>>>> OLD
NEW <<<<
new contents
>>>> NEW"""
DELETE_FILE_PROMPT = """
This tool is a wrapper for the GitHub API, useful when you need to delete a file in a GitHub repository. Simply pass in the full file path of the file you would like to delete. **IMPORTANT**: the path must not start with a slash"""
GET_PR_PROMPT = """
This tool will fetch the title, body, comment thread and commit history of a specific Pull Request (by PR number). **VERY IMPORTANT**: You must specify the PR number as an integer."""
LIST_PRS_PROMPT = """
This tool will fetch a list of the repository's Pull Requests (PRs). It will return the title, and PR number of 5 PRs. It takes no input."""
LIST_PULL_REQUEST_FILES = """
This tool will fetch the full text of all files in a pull request (PR) given the PR number as an input. This is useful for understanding the code changes in a PR or contributing to it. **VERY IMPORTANT**: You must specify the PR number as an integer input parameter."""
OVERVIEW_EXISTING_FILES_IN_MAIN = """
This tool will provide an overview of all existing files in the main branch of the repository. It will list the file names, their respective paths, and a brief summary of their contents. This can be useful for understanding the structure and content of the repository, especially when navigating through large codebases. No input parameters are required."""
OVERVIEW_EXISTING_FILES_BOT_BRANCH = """
This tool will provide an overview of all files in your current working branch where you should implement changes. This is great for getting a high level overview of the structure of your code. No input parameters are required."""
SEARCH_ISSUES_AND_PRS_PROMPT = """
This tool will search for issues and pull requests in the repository. **VERY IMPORTANT**: You must specify the search query as a string input parameter."""
SEARCH_CODE_PROMPT = """
This tool will search for code in the repository. **VERY IMPORTANT**: You must specify the search query as a string input parameter."""
CREATE_REVIEW_REQUEST_PROMPT = """
This tool will create a review request on the open pull request that matches the current active branch. **VERY IMPORTANT**: You must specify the username of the person who is being requested as a string input parameter."""
LIST_BRANCHES_IN_REPO_PROMPT = """
This tool will fetch a list of all branches in the repository. It will return the name of each branch. No input parameters are required."""
SET_ACTIVE_BRANCH_PROMPT = """
This tool will set the active branch in the repository, similar to `git checkout <branch_name>` and `git switch -c <branch_name>`. **VERY IMPORTANT**: You must specify the name of the branch as a string input parameter."""
CREATE_BRANCH_PROMPT = """
This tool will create a new branch in the repository. **VERY IMPORTANT**: You must specify the name of the new branch as a string input parameter."""
GET_FILES_FROM_DIRECTORY_PROMPT = """
This tool will fetch a list of all files in a specified directory. **VERY IMPORTANT**: You must specify the path of the directory as a string input parameter."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/openai_dalle_image_generation/tool.py | """Tool for the OpenAI DALLE V1 Image Generation SDK."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
class OpenAIDALLEImageGenerationTool(BaseTool): # type: ignore[override]
"""Tool that generates an image using OpenAI DALLE."""
name: str = "openai_dalle"
description: str = (
"A wrapper around OpenAI DALLE Image Generation. "
"Useful for when you need to generate an image of"
"people, places, paintings, animals, or other subjects. "
"Input should be a text prompt to generate an image."
)
api_wrapper: DallEAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the OpenAI DALLE Image Generation tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/openai_dalle_image_generation/__init__.py | """Tool to generate an image using DALLE OpenAI V1 SDK."""
from langchain_community.tools.openai_dalle_image_generation.tool import (
OpenAIDALLEImageGenerationTool,
)
__all__ = ["OpenAIDALLEImageGenerationTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wolfram_alpha/tool.py | """Tool for the Wolfram Alpha API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries using the Wolfram Alpha SDK."""
name: str = "wolfram_alpha"
description: str = (
"A wrapper around Wolfram Alpha. "
"Useful for when you need to answer questions about Math, "
"Science, Technology, Culture, Society and Everyday Life. "
"Input should be a search query."
)
api_wrapper: WolframAlphaAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the WolframAlpha tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wolfram_alpha/__init__.py | """Wolfram Alpha API toolkit."""
from langchain_community.tools.wolfram_alpha.tool import WolframAlphaQueryRun
__all__ = [
"WolframAlphaQueryRun",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gitlab/tool.py | """
This tool allows agents to interact with the python-gitlab library
and operate on a GitLab repository.
To use this tool, you must first set as environment variables:
GITLAB_PRIVATE_ACCESS_TOKEN
GITLAB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.gitlab import GitLabAPIWrapper
class GitLabAction(BaseTool): # type: ignore[override]
"""Tool for interacting with the GitLab API."""
api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper) # type: ignore[arg-type]
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the GitLab API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gitlab/__init__.py | """GitLab Tool"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gitlab/prompt.py | # flake8: noqa
GET_ISSUES_PROMPT = """
This tool will fetch a list of the repository's issues. It will return the title, and issue number of 5 issues. It takes no input.
"""
GET_ISSUE_PROMPT = """
This tool will fetch the title, body, and comment thread of a specific issue. **VERY IMPORTANT**: You must specify the issue number as an integer.
"""
COMMENT_ON_ISSUE_PROMPT = """
This tool is useful when you need to comment on a GitLab issue. Simply pass in the issue number and the comment you would like to make. Please use this sparingly as we don't want to clutter the comment threads. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify the issue number as an integer
- Then you must place two newlines
- Then you must specify your comment
"""
CREATE_PULL_REQUEST_PROMPT = """
This tool is useful when you need to create a new pull request in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify the title of the pull request
- Then you must place two newlines
- Then you must write the body or description of the pull request
To reference an issue in the body, put its issue number directly after a #.
For example, if you would like to create a pull request called "README updates" with contents "added contributors' names, closes issue #3", you would pass in the following string:
README updates
added contributors' names, closes issue #3
"""
CREATE_FILE_PROMPT = """
This tool is a wrapper for the GitLab API, useful when you need to create a file in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify which file to create by passing a full file path (**IMPORTANT**: the path must not start with a slash)
- Then you must specify the contents of the file
For example, if you would like to create a file called /test/test.txt with contents "test contents", you would pass in the following string:
test/test.txt
test contents
"""
READ_FILE_PROMPT = """
This tool is a wrapper for the GitLab API, useful when you need to read the contents of a file in a GitLab repository. Simply pass in the full file path of the file you would like to read. **IMPORTANT**: the path must not start with a slash
"""
UPDATE_FILE_PROMPT = """
This tool is a wrapper for the GitLab API, useful when you need to update the contents of a file in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
- First you must specify which file to modify by passing a full file path (**IMPORTANT**: the path must not start with a slash)
- Then you must specify the old contents which you would like to replace wrapped in OLD <<<< and >>>> OLD
- Then you must specify the new contents which you would like to replace the old contents with wrapped in NEW <<<< and >>>> NEW
For example, if you would like to replace the contents of the file /test/test.txt from "old contents" to "new contents", you would pass in the following string:
test/test.txt
This is text that will not be changed
OLD <<<<
old contents
>>>> OLD
NEW <<<<
new contents
>>>> NEW
"""
DELETE_FILE_PROMPT = """
This tool is a wrapper for the GitLab API, useful when you need to delete a file in a GitLab repository. Simply pass in the full file path of the file you would like to delete. **IMPORTANT**: the path must not start with a slash
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/openweathermap/tool.py | """Tool for the OpenWeatherMap API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class OpenWeatherMapQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the OpenWeatherMap API."""
api_wrapper: OpenWeatherMapAPIWrapper = Field(
default_factory=OpenWeatherMapAPIWrapper # type: ignore[arg-type]
)
name: str = "open_weather_map"
description: str = (
"A wrapper around OpenWeatherMap API. "
"Useful for fetching current weather information for a specified location. "
"Input should be a location string (e.g. London,GB)."
)
def _run(
self, location: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the OpenWeatherMap tool."""
return self.api_wrapper.run(location)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/openweathermap/__init__.py | """OpenWeatherMap API toolkit."""
from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun
__all__ = [
"OpenWeatherMapQueryRun",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/nuclia/tool.py | """Tool for the Nuclia Understanding API.
Installation:
```bash
pip install --upgrade protobuf
pip install nucliadb-protos
```
"""
import asyncio
import base64
import logging
import mimetypes
import os
from typing import Any, Dict, Optional, Type, Union
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class NUASchema(BaseModel):
"""Input for Nuclia Understanding API.
Attributes:
action: Action to perform. Either `push` or `pull`.
id: ID of the file to push or pull.
path: Path to the file to push (needed only for `push` action).
text: Text content to process (needed only for `push` action).
"""
action: str = Field(
...,
description="Action to perform. Either `push` or `pull`.",
)
id: str = Field(
...,
description="ID of the file to push or pull.",
)
path: Optional[str] = Field(
...,
description="Path to the file to push (needed only for `push` action).",
)
text: Optional[str] = Field(
...,
description="Text content to process (needed only for `push` action).",
)
class NucliaUnderstandingAPI(BaseTool): # type: ignore[override, override]
"""Tool to process files with the Nuclia Understanding API."""
name: str = "nuclia_understanding_api"
description: str = (
"A wrapper around Nuclia Understanding API endpoints. "
"Useful for when you need to extract text from any kind of files. "
)
args_schema: Type[BaseModel] = NUASchema
_results: Dict[str, Any] = {}
_config: Dict[str, Any] = {}
def __init__(self, enable_ml: bool = False) -> None:
zone = os.environ.get("NUCLIA_ZONE", "europe-1")
self._config["BACKEND"] = f"https://{zone}.nuclia.cloud/api/v1"
key = os.environ.get("NUCLIA_NUA_KEY")
if not key:
raise ValueError("NUCLIA_NUA_KEY environment variable not set")
else:
self._config["NUA_KEY"] = key
self._config["enable_ml"] = enable_ml
super().__init__() # type: ignore[call-arg]
def _run(
self,
action: str,
id: str,
path: Optional[str],
text: Optional[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if action == "push":
self._check_params(path, text)
if path:
return self._pushFile(id, path)
if text:
return self._pushText(id, text)
elif action == "pull":
return self._pull(id)
return ""
async def _arun(
self,
action: str,
id: str,
path: Optional[str] = None,
text: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
self._check_params(path, text)
if path:
self._pushFile(id, path)
if text:
self._pushText(id, text)
data = None
while True:
data = self._pull(id)
if data:
break
await asyncio.sleep(15)
return data
def _pushText(self, id: str, text: str) -> str:
field = {
"textfield": {"text": {"body": text, "format": 0}},
"processing_options": {"ml_text": self._config["enable_ml"]},
}
return self._pushField(id, field)
def _pushFile(self, id: str, content_path: str) -> str:
with open(content_path, "rb") as source_file:
response = requests.post(
self._config["BACKEND"] + "/processing/upload",
headers={
"content-type": mimetypes.guess_type(content_path)[0]
or "application/octet-stream",
"x-stf-nuakey": "Bearer " + self._config["NUA_KEY"],
},
data=source_file.read(),
)
if response.status_code != 200:
logger.info(
f"Error uploading {content_path}: "
f"{response.status_code} {response.text}"
)
return ""
else:
field = {
"filefield": {"file": f"{response.text}"},
"processing_options": {"ml_text": self._config["enable_ml"]},
}
return self._pushField(id, field)
def _pushField(self, id: str, field: Any) -> str:
logger.info(f"Pushing {id} in queue")
response = requests.post(
self._config["BACKEND"] + "/processing/push",
headers={
"content-type": "application/json",
"x-stf-nuakey": "Bearer " + self._config["NUA_KEY"],
},
json=field,
)
if response.status_code != 200:
logger.info(
f"Error pushing field {id}:" f"{response.status_code} {response.text}"
)
raise ValueError("Error pushing field")
else:
uuid = response.json()["uuid"]
logger.info(f"Field {id} pushed in queue, uuid: {uuid}")
self._results[id] = {"uuid": uuid, "status": "pending"}
return uuid
def _pull(self, id: str) -> str:
self._pull_queue()
result = self._results.get(id, None)
if not result:
logger.info(f"{id} not in queue")
return ""
elif result["status"] == "pending":
logger.info(f'Waiting for {result["uuid"]} to be processed')
return ""
else:
return result["data"]
def _pull_queue(self) -> None:
try:
from nucliadb_protos.writer_pb2 import BrokerMessage
except ImportError as e:
raise ImportError(
"nucliadb-protos is not installed. "
"Run `pip install nucliadb-protos` to install."
) from e
try:
from google.protobuf.json_format import MessageToJson
except ImportError as e:
raise ImportError(
"Unable to import google.protobuf, please install with "
"`pip install protobuf`."
) from e
res = requests.get(
self._config["BACKEND"] + "/processing/pull",
headers={
"x-stf-nuakey": "Bearer " + self._config["NUA_KEY"],
},
).json()
if res["status"] == "empty":
logger.info("Queue empty")
elif res["status"] == "ok":
payload = res["payload"]
pb = BrokerMessage()
pb.ParseFromString(base64.b64decode(payload))
uuid = pb.uuid
logger.info(f"Pulled {uuid} from queue")
matching_id = self._find_matching_id(uuid)
if not matching_id:
logger.info(f"No matching id for {uuid}")
else:
self._results[matching_id]["status"] = "done"
data = MessageToJson(
pb,
preserving_proto_field_name=True,
including_default_value_fields=True, # type: ignore
)
self._results[matching_id]["data"] = data
def _find_matching_id(self, uuid: str) -> Union[str, None]:
for id, result in self._results.items():
if result["uuid"] == uuid:
return id
return None
def _check_params(self, path: Optional[str], text: Optional[str]) -> None:
if not path and not text:
raise ValueError("File path or text is required")
if path and text:
raise ValueError("Cannot process both file and text on a single run")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/nuclia/__init__.py | from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
__all__ = ["NucliaUnderstandingAPI"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/riza/command.py | """
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel, Field
class ExecPythonInput(BaseModel):
code: str = Field(description="the Python code to execute")
class ExecPython(BaseTool): # type: ignore[override, override]
"""Riza Code tool.
Setup:
Install ``langchain-community`` and ``rizaio`` and set environment variable ``RIZA_API_KEY``.
.. code-block:: bash
pip install -U langchain-community rizaio
export RIZA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.riza.command import ExecPython
tool = ExecPython()
Invocation with args:
.. code-block:: python
tool.invoke("x = 5; print(x)")
.. code-block:: python
'5\\n'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
""" # noqa: E501
name: str = "riza_exec_python"
description: str = """Execute Python code to solve problems.
The Python runtime does not have filesystem access. You can use the httpx
or requests library to make HTTP requests. Always print output to stdout."""
args_schema: Type[BaseModel] = ExecPythonInput
handle_tool_error: bool = True
client: Any = None
def __init__(self, **kwargs: Any) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(language="PYTHON", code=code)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
class ExecJavaScriptInput(BaseModel):
code: str = Field(description="the JavaScript code to execute")
class ExecJavaScript(BaseTool): # type: ignore[override, override]
"""A tool implementation to execute JavaScript via Riza's Code Interpreter API."""
name: str = "riza_exec_javascript"
description: str = """Execute JavaScript code to solve problems.
The JavaScript runtime does not have filesystem access, but can use fetch
to make HTTP requests and does include the global JSON object. Always print
output to stdout."""
args_schema: Type[BaseModel] = ExecJavaScriptInput
handle_tool_error: bool = True
client: Any = None
def __init__(self, **kwargs: Any) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(language="JAVASCRIPT", code=code)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ddg_search/tool.py | """Tool for the DuckDuckGo search API."""
import json
import warnings
from typing import Any, List, Literal, Optional, Type, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
class DDGInput(BaseModel):
"""Input for the DuckDuckGo search tool."""
query: str = Field(description="search query to look up")
class DuckDuckGoSearchRun(BaseTool): # type: ignore[override, override]
"""DuckDuckGo tool.
Setup:
Install ``duckduckgo-search`` and ``langchain-community``.
.. code-block:: bash
pip install -U duckduckgo-search langchain-community
Instantiation:
.. code-block:: python
from langchain_community.tools import DuckDuckGoSearchResults
tool = DuckDuckGoSearchResults()
Invocation with args:
.. code-block:: python
tool.invoke("Obama")
.. code-block:: python
'[snippet: Users on X have been widely comparing the boost of support felt for Kamala Harris\' campaign to Barack Obama\'s in 2008., title: Surging Support For Kamala Harris Compared To Obama-Era Energy, link: https://www.msn.com/en-us/news/politics/surging-support-for-kamala-harris-compared-to-obama-era-energy/ar-BB1qzdC0, date: 2024-07-24T18:27:01+00:00, source: Newsweek on MSN.com], [snippet: Harris tried to emulate Obama\'s coalition in 2020 and failed. She may have a better shot at reaching young, Black, and Latino voters this time around., title: Harris May Follow Obama\'s Path to the White House After All, link: https://www.msn.com/en-us/news/politics/harris-may-follow-obama-s-path-to-the-white-house-after-all/ar-BB1qv9d4, date: 2024-07-23T22:42:00+00:00, source: Intelligencer on MSN.com], [snippet: The Republican presidential candidate said in an interview on Fox News that he "wouldn\'t be worried" about Michelle Obama running., title: Donald Trump Responds to Michelle Obama Threat, link: https://www.msn.com/en-us/news/politics/donald-trump-responds-to-michelle-obama-threat/ar-BB1qqtu5, date: 2024-07-22T18:26:00+00:00, source: Newsweek on MSN.com], [snippet: H eading into the weekend at his vacation home in Rehoboth Beach, Del., President Biden was reportedly stewing over Barack Obama\'s role in the orchestrated campaign to force him, title: Opinion | Barack Obama Strikes Again, link: https://www.msn.com/en-us/news/politics/opinion-barack-obama-strikes-again/ar-BB1qrfiy, date: 2024-07-22T21:28:00+00:00, source: The Wall Street Journal on MSN.com]'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"query":"Obama"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
ToolMessage(content="[snippet: Biden, Obama and the Clintons Will Speak at the Democratic Convention. The president, two of his predecessors and the party's 2016 nominee are said to be planning speeches at the party's ..., title: Biden, Obama and the Clintons Will Speak at the Democratic Convention ..., link: https://www.nytimes.com/2024/08/12/us/politics/dnc-speakers-biden-obama-clinton.html], [snippet: Barack Obama—with his wife, Michelle—being sworn in as the 44th president of the United States, January 20, 2009. Key events in the life of Barack Obama. Barack Obama (born August 4, 1961, Honolulu, Hawaii, U.S.) is the 44th president of the United States (2009-17) and the first African American to hold the office., title: Barack Obama | Biography, Parents, Education, Presidency, Books ..., link: https://www.britannica.com/biography/Barack-Obama], [snippet: Former President Barack Obama released a letter about President Biden's decision to drop out of the 2024 presidential race. Notably, Obama did not name or endorse Vice President Kamala Harris., title: Read Obama's full statement on Biden dropping out - CBS News, link: https://www.cbsnews.com/news/barack-obama-biden-dropping-out-2024-presidential-race-full-statement/], [snippet: Many of the marquee names in Democratic politics began quickly lining up behind Vice President Kamala Harris on Sunday, but one towering presence in the party held back: Barack Obama. The former ..., title: Why Obama Hasn't Endorsed Harris - The New York Times, link: https://www.nytimes.com/2024/07/21/us/politics/why-obama-hasnt-endorsed-harris.html]", name='duckduckgo_results_json', tool_call_id='1')
""" # noqa: E501
name: str = "duckduckgo_search"
description: str = (
"A wrapper around DuckDuckGo Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
api_wrapper: DuckDuckGoSearchAPIWrapper = Field(
default_factory=DuckDuckGoSearchAPIWrapper
)
args_schema: Type[BaseModel] = DDGInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
class DuckDuckGoSearchResults(BaseTool): # type: ignore[override, override]
"""Tool that queries the DuckDuckGo search API and
returns the results in `output_format`."""
name: str = "duckduckgo_results_json"
description: str = (
"A wrapper around Duck Duck Go Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
max_results: int = Field(alias="num_results", default=4)
api_wrapper: DuckDuckGoSearchAPIWrapper = Field(
default_factory=DuckDuckGoSearchAPIWrapper
)
backend: str = "text"
args_schema: Type[BaseModel] = DDGInput
keys_to_include: Optional[List[str]] = None
"""Which keys from each result to include. If None all keys are included."""
results_separator: str = ", "
"""Character for separating results."""
output_format: Literal["string", "json", "list"] = "string"
"""Output format of the search results.
- 'string': Return a concatenated string of the search results.
- 'json': Return a JSON string of the search results.
- 'list': Return a list of dictionaries of the search results.
"""
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> tuple[Union[List[dict], str], List[dict]]:
"""Use the tool."""
raw_results = self.api_wrapper.results(
query, self.max_results, source=self.backend
)
results = [
{
k: v
for k, v in d.items()
if not self.keys_to_include or k in self.keys_to_include
}
for d in raw_results
]
if self.output_format == "list":
return results, raw_results
elif self.output_format == "json":
return json.dumps(results), raw_results
elif self.output_format == "string":
res_strs = [", ".join([f"{k}: {v}" for k, v in d.items()]) for d in results]
return self.results_separator.join(res_strs), raw_results
else:
raise ValueError(
f"Invalid output_format: {self.output_format}. "
"Needs to be one of 'string', 'json', 'list'."
)
def DuckDuckGoSearchTool(*args: Any, **kwargs: Any) -> DuckDuckGoSearchRun:
"""
Deprecated. Use DuckDuckGoSearchRun instead.
Args:
*args:
**kwargs:
Returns:
DuckDuckGoSearchRun
"""
warnings.warn(
"DuckDuckGoSearchTool will be deprecated in the future. "
"Please use DuckDuckGoSearchRun instead.",
DeprecationWarning,
)
return DuckDuckGoSearchRun(*args, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/ddg_search/__init__.py | """DuckDuckGo Search API toolkit."""
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
__all__ = ["DuckDuckGoSearchRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_jobs/tool.py | """Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
class GoogleJobsQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google Jobs API."""
name: str = "google_jobs"
description: str = (
"A wrapper around Google Jobs Search. "
"Useful for when you need to get information about"
"google search Jobs from Google Jobs"
"Input should be a search query."
)
api_wrapper: GoogleJobsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_jobs/__init__.py | """Google Jobs API Toolkit."""
from langchain_community.tools.google_jobs.tool import GoogleJobsQueryRun
__all__ = ["GoogleJobsQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/jina_search/tool.py | from __future__ import annotations
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.jina_search import JinaSearchAPIWrapper
class JinaInput(BaseModel):
"""Input for the Jina search tool."""
query: str = Field(description="search query to look up")
class JinaSearch(BaseTool): # type: ignore[override]
"""Tool that queries the JinaSearch.
..versionadded:: 0.2.16
"""
name: str = "jina_search"
description: str = (
"Jina Reader allows you to ground your LLM with the latest information from "
"the web. "
"Jina Reader will search the web and return the top five results with their "
"URLs and contents, "
"each in clean, LLM-friendly text. This way, you can always keep your LLM "
"up-to-date, improve its factuality, and reduce hallucinations."
)
search_wrapper: JinaSearchAPIWrapper = Field(default_factory=JinaSearchAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/jina_search/__init__.py | """Jina AI toolkit"""
from langchain_community.tools.jina_search.tool import JinaSearch
__all__ = ["JinaSearch"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.