index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/requests.py | """Lightweight wrapper around requests library, with async support."""
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Dict, Literal, Optional, Union
import aiohttp
import requests
from pydantic import BaseModel, ConfigDict
from requests import Response
class Requests(BaseModel):
"""Wrapper around requests to handle auth and async.
The main purpose of this wrapper is to handle authentication (by saving
headers) and enable easy async methods on the same base object.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
auth: Optional[Any] = None
verify: Optional[bool] = True
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def get(self, url: str, **kwargs: Any) -> requests.Response:
"""GET the URL and return the text."""
return requests.get(
url, headers=self.headers, auth=self.auth, verify=self.verify, **kwargs
)
def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""POST to the URL and return the text."""
return requests.post(
url,
json=data,
headers=self.headers,
auth=self.auth,
verify=self.verify,
**kwargs,
)
def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PATCH the URL and return the text."""
return requests.patch(
url,
json=data,
headers=self.headers,
auth=self.auth,
verify=self.verify,
**kwargs,
)
def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PUT the URL and return the text."""
return requests.put(
url,
json=data,
headers=self.headers,
auth=self.auth,
verify=self.verify,
**kwargs,
)
def delete(self, url: str, **kwargs: Any) -> requests.Response:
"""DELETE the URL and return the text."""
return requests.delete(
url, headers=self.headers, auth=self.auth, verify=self.verify, **kwargs
)
@asynccontextmanager
async def _arequest(
self, method: str, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""Make an async request."""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.request(
method,
url,
headers=self.headers,
auth=self.auth,
**kwargs,
) as response:
yield response
else:
async with self.aiosession.request(
method,
url,
headers=self.headers,
auth=self.auth,
**kwargs,
) as response:
yield response
@asynccontextmanager
async def aget(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""GET the URL and return the text asynchronously."""
async with self._arequest("GET", url, **kwargs) as response:
yield response
@asynccontextmanager
async def apost(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""POST to the URL and return the text asynchronously."""
async with self._arequest("POST", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def apatch(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PATCH the URL and return the text asynchronously."""
async with self._arequest("PATCH", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def aput(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PUT the URL and return the text asynchronously."""
async with self._arequest("PUT", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def adelete(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""DELETE the URL and return the text asynchronously."""
async with self._arequest("DELETE", url, **kwargs) as response:
yield response
class GenericRequestsWrapper(BaseModel):
"""Lightweight wrapper around requests library."""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
auth: Optional[Any] = None
response_content_type: Literal["text", "json"] = "text"
verify: bool = True
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def requests(self) -> Requests:
return Requests(
headers=self.headers,
aiosession=self.aiosession,
auth=self.auth,
verify=self.verify,
)
def _get_resp_content(self, response: Response) -> Union[str, Dict[str, Any]]:
if self.response_content_type == "text":
return response.text
elif self.response_content_type == "json":
return response.json()
else:
raise ValueError(f"Invalid return type: {self.response_content_type}")
async def _aget_resp_content(
self, response: aiohttp.ClientResponse
) -> Union[str, Dict[str, Any]]:
if self.response_content_type == "text":
return await response.text()
elif self.response_content_type == "json":
return await response.json()
else:
raise ValueError(f"Invalid return type: {self.response_content_type}")
def get(self, url: str, **kwargs: Any) -> Union[str, Dict[str, Any]]:
"""GET the URL and return the text."""
return self._get_resp_content(self.requests.get(url, **kwargs))
def post(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""POST to the URL and return the text."""
return self._get_resp_content(self.requests.post(url, data, **kwargs))
def patch(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""PATCH the URL and return the text."""
return self._get_resp_content(self.requests.patch(url, data, **kwargs))
def put(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""PUT the URL and return the text."""
return self._get_resp_content(self.requests.put(url, data, **kwargs))
def delete(self, url: str, **kwargs: Any) -> Union[str, Dict[str, Any]]:
"""DELETE the URL and return the text."""
return self._get_resp_content(self.requests.delete(url, **kwargs))
async def aget(self, url: str, **kwargs: Any) -> Union[str, Dict[str, Any]]:
"""GET the URL and return the text asynchronously."""
async with self.requests.aget(url, **kwargs) as response:
return await self._aget_resp_content(response)
async def apost(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""POST to the URL and return the text asynchronously."""
async with self.requests.apost(url, data, **kwargs) as response:
return await self._aget_resp_content(response)
async def apatch(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, data, **kwargs) as response:
return await self._aget_resp_content(response)
async def aput(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> Union[str, Dict[str, Any]]:
"""PUT the URL and return the text asynchronously."""
async with self.requests.aput(url, data, **kwargs) as response:
return await self._aget_resp_content(response)
async def adelete(self, url: str, **kwargs: Any) -> Union[str, Dict[str, Any]]:
"""DELETE the URL and return the text asynchronously."""
async with self.requests.adelete(url, **kwargs) as response:
return await self._aget_resp_content(response)
class JsonRequestsWrapper(GenericRequestsWrapper):
"""Lightweight wrapper around requests library, with async support.
The main purpose of this wrapper is to always return a json output."""
response_content_type: Literal["text", "json"] = "json"
class TextRequestsWrapper(GenericRequestsWrapper):
"""Lightweight wrapper around requests library, with async support.
The main purpose of this wrapper is to always return a text output."""
response_content_type: Literal["text", "json"] = "text"
# For backwards compatibility
RequestsWrapper = TextRequestsWrapper
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_finance.py | """Util that calls Google Finance Search."""
from typing import Any, Dict, Optional, cast
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
class GoogleFinanceAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Finance API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleFinanceAPIWrapper
google_Finance = GoogleFinanceAPIWrapper()
google_Finance.run('langchain')
"""
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Finance with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_finance",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results = {}
client = self.serp_search_engine(params)
total_results = client.get_dict()
if not total_results:
return "Nothing was found from the query: " + query
markets = total_results.get("markets", {})
res = "\nQuery: " + query + "\n"
if "futures_chain" in total_results:
futures_chain = total_results.get("futures_chain", [])[0]
stock = futures_chain["stock"]
price = futures_chain["price"]
temp = futures_chain["price_movement"]
percentage = temp["percentage"]
movement = temp["movement"]
res += (
f"stock: {stock}\n"
+ f"price: {price}\n"
+ f"percentage: {percentage}\n"
+ f"movement: {movement}\n"
)
else:
res += "No summary information\n"
for key in markets:
if (key == "us") or (key == "asia") or (key == "europe"):
res += key
res += ": price = "
res += str(markets[key][0]["price"])
res += ", movement = "
res += markets[key][0]["price_movement"]["movement"]
res += "\n"
return res
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/openweathermap.py | """Util that calls OpenWeatherMap using PyOWM."""
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class OpenWeatherMapAPIWrapper(BaseModel):
"""Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
1. Go to OpenWeatherMap and sign up for an API key
2. Save your API KEY into OPENWEATHERMAP_API_KEY env variable
3. pip install pyowm
"""
owm: Any = None
openweathermap_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(
values, "openweathermap_api_key", "OPENWEATHERMAP_API_KEY"
)
try:
import pyowm
except ImportError:
raise ImportError(
"pyowm is not installed. Please install it with `pip install pyowm`"
)
owm = pyowm.OWM(openweathermap_api_key)
values["owm"] = owm
return values
def _format_weather_info(self, location: str, w: Any) -> str:
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
temperature = w.temperature("celsius")
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {location}, the current weather is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
f"Temperature: \n"
f" - Current: {temperature['temp']}°C\n"
f" - High: {temperature['temp_max']}°C\n"
f" - Low: {temperature['temp_min']}°C\n"
f" - Feels like: {temperature['feels_like']}°C\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index}\n"
f"Cloud cover: {clouds}%"
)
def run(self, location: str) -> str:
"""Get the current weather information for a specified location."""
mgr = self.owm.weather_manager()
observation = mgr.weather_at_place(location)
w = observation.weather
return self._format_weather_info(location, w)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/wikipedia.py | """Util that calls Wikipedia."""
import logging
from typing import Any, Dict, Iterator, List, Optional
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
import wikipedia
lang = values.get("lang", "en")
wikipedia.set_lang(lang)
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(
query[:WIKIPEDIA_MAX_QUERY_LENGTH], results=self.top_k_results
)
summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _page_to_document(self, page_title: str, wiki_page: Any) -> Document:
main_meta = {
"title": page_title,
"summary": wiki_page.summary,
"source": wiki_page.url,
}
add_meta = (
{
"categories": wiki_page.categories,
"page_url": wiki_page.url,
"image_urls": wiki_page.images,
"related_titles": wiki_page.links,
"parent_id": wiki_page.parent_id,
"references": wiki_page.references,
"revision_id": wiki_page.revision_id,
"sections": wiki_page.sections,
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=wiki_page.content[: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
def load(self, query: str) -> List[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
return list(self.lazy_load(query))
def lazy_load(self, query: str) -> Iterator[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
page_titles = self.wiki_client.search(
query[:WIKIPEDIA_MAX_QUERY_LENGTH], results=self.top_k_results
)
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if doc := self._page_to_document(page_title, wiki_page):
yield doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/financial_datasets.py | """
Util that calls several of financial datasets stock market REST APIs.
Docs: https://docs.financialdatasets.ai/
"""
import json
from typing import Any, List, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel
FINANCIAL_DATASETS_BASE_URL = "https://api.financialdatasets.ai/"
class FinancialDatasetsAPIWrapper(BaseModel):
"""Wrapper for financial datasets API."""
financial_datasets_api_key: Optional[str] = None
def __init__(self, **data: Any):
super().__init__(**data)
self.financial_datasets_api_key = get_from_dict_or_env(
data, "financial_datasets_api_key", "FINANCIAL_DATASETS_API_KEY"
)
@property
def _api_key(self) -> str:
if self.financial_datasets_api_key is None:
raise ValueError(
"API key is required for the FinancialDatasetsAPIWrapper. "
"Please provide the API key by either:\n"
"1. Manually specifying it when initializing the wrapper: "
"FinancialDatasetsAPIWrapper(financial_datasets_api_key='your_api_key')\n"
"2. Setting it as an environment variable: FINANCIAL_DATASETS_API_KEY"
)
return self.financial_datasets_api_key
def get_income_statements(
self,
ticker: str,
period: str,
limit: Optional[int],
) -> Optional[dict]:
"""
Get the income statements for a stock `ticker` over a `period` of time.
:param ticker: the stock ticker
:param period: the period of time to get the balance sheets for.
Possible values are: annual, quarterly, ttm.
:param limit: the number of results to return, default is 10
:return: a list of income statements
"""
url = (
f"{FINANCIAL_DATASETS_BASE_URL}financials/income-statements/"
f"?ticker={ticker}"
f"&period={period}"
f"&limit={limit if limit else 10}"
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
# Execute the request
response = requests.get(url, headers=headers)
data = response.json()
return data.get("income_statements", None)
def get_balance_sheets(
self,
ticker: str,
period: str,
limit: Optional[int],
) -> List[dict]:
"""
Get the balance sheets for a stock `ticker` over a `period` of time.
:param ticker: the stock ticker
:param period: the period of time to get the balance sheets for.
Possible values are: annual, quarterly, ttm.
:param limit: the number of results to return, default is 10
:return: a list of balance sheets
"""
url = (
f"{FINANCIAL_DATASETS_BASE_URL}financials/balance-sheets/"
f"?ticker={ticker}"
f"&period={period}"
f"&limit={limit if limit else 10}"
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
# Execute the request
response = requests.get(url, headers=headers)
data = response.json()
return data.get("balance_sheets", None)
def get_cash_flow_statements(
self,
ticker: str,
period: str,
limit: Optional[int],
) -> List[dict]:
"""
Get the cash flow statements for a stock `ticker` over a `period` of time.
:param ticker: the stock ticker
:param period: the period of time to get the balance sheets for.
Possible values are: annual, quarterly, ttm.
:param limit: the number of results to return, default is 10
:return: a list of cash flow statements
"""
url = (
f"{FINANCIAL_DATASETS_BASE_URL}financials/cash-flow-statements/"
f"?ticker={ticker}"
f"&period={period}"
f"&limit={limit if limit else 10}"
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
# Execute the request
response = requests.get(url, headers=headers)
data = response.json()
return data.get("cash_flow_statements", None)
def run(self, mode: str, ticker: str, **kwargs: Any) -> str:
if mode == "get_income_statements":
period = kwargs.get("period", "annual")
limit = kwargs.get("limit", 10)
return json.dumps(self.get_income_statements(ticker, period, limit))
elif mode == "get_balance_sheets":
period = kwargs.get("period", "annual")
limit = kwargs.get("limit", 10)
return json.dumps(self.get_balance_sheets(ticker, period, limit))
elif mode == "get_cash_flow_statements":
period = kwargs.get("period", "annual")
limit = kwargs.get("limit", 10)
return json.dumps(self.get_cash_flow_statements(ticker, period, limit))
else:
raise ValueError(f"Invalid mode {mode} for financial datasets API.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/nvidia_riva.py | """A common module for NVIDIA Riva Runnables."""
import asyncio
import logging
import pathlib
import queue
import tempfile
import threading
import wave
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Dict,
Generator,
Iterator,
List,
Optional,
Tuple,
Union,
cast,
)
from langchain_core.messages import AnyMessage, BaseMessage
from langchain_core.prompt_values import PromptValue
from langchain_core.runnables import RunnableConfig, RunnableSerializable
from pydantic import (
AnyHttpUrl,
BaseModel,
Field,
parse_obj_as,
root_validator,
validator,
)
if TYPE_CHECKING:
import riva.client
import riva.client.proto.riva_asr_pb2 as rasr
_LOGGER = logging.getLogger(__name__)
_QUEUE_GET_TIMEOUT = 0.5
_MAX_TEXT_LENGTH = 400
_SENTENCE_TERMINATORS = ("\n", ".", "!", "?", "¡", "¿")
# COMMON utilities used by all Riva Runnables
def _import_riva_client() -> "riva.client":
"""Import the riva client and raise an error on failure."""
try:
# pylint: disable-next=import-outside-toplevel # this client library is optional
import riva.client
except ImportError as err:
raise ImportError(
"Could not import the NVIDIA Riva client library. "
"Please install it with `pip install nvidia-riva-client`."
) from err
return riva.client
class SentinelT: # pylint: disable=too-few-public-methods
"""An empty Sentinel type."""
HANGUP = SentinelT()
_TRANSFORM_END = SentinelT()
class RivaAudioEncoding(str, Enum):
"""An enum of the possible choices for Riva audio encoding.
The list of types exposed by the Riva GRPC Protobuf files can be found
with the following commands:
```python
import riva.client
print(riva.client.AudioEncoding.keys()) # noqa: T201
```
"""
ALAW = "ALAW"
ENCODING_UNSPECIFIED = "ENCODING_UNSPECIFIED"
FLAC = "FLAC"
LINEAR_PCM = "LINEAR_PCM"
MULAW = "MULAW"
OGGOPUS = "OGGOPUS"
@classmethod
def from_wave_format_code(cls, format_code: int) -> "RivaAudioEncoding":
"""Return the audio encoding specified by the format code in the wave file.
ref: https://mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
"""
try:
return {1: cls.LINEAR_PCM, 6: cls.ALAW, 7: cls.MULAW}[format_code]
except KeyError as err:
raise NotImplementedError(
"The following wave file format code is "
f"not supported by Riva: {format_code}"
) from err
@property
def riva_pb2(self) -> "riva.client.AudioEncoding":
"""Returns the Riva API object for the encoding."""
riva_client = _import_riva_client()
return getattr(riva_client.AudioEncoding, self)
class RivaAuthMixin(BaseModel):
"""Configuration for the authentication to a Riva service connection."""
url: Union[AnyHttpUrl, str] = Field(
AnyHttpUrl("http://localhost:50051"),
description="The full URL where the Riva service can be found.",
examples=["http://localhost:50051", "https://user@pass:riva.example.com"],
)
ssl_cert: Optional[str] = Field(
None,
description="A full path to the file where Riva's public ssl key can be read.",
)
@property
def auth(self) -> "riva.client.Auth":
"""Return a riva client auth object."""
riva_client = _import_riva_client()
url = cast(AnyHttpUrl, self.url)
use_ssl = url.scheme == "https" # pylint: disable=no-member # false positive
url_no_scheme = str(self.url).split("/")[2]
return riva_client.Auth(
ssl_cert=self.ssl_cert, use_ssl=use_ssl, uri=url_no_scheme
)
@validator("url", pre=True, allow_reuse=True)
@classmethod
def _validate_url(cls, val: Any) -> AnyHttpUrl:
"""Do some initial conversations for the URL before checking."""
if isinstance(val, str):
return cast(AnyHttpUrl, parse_obj_as(AnyHttpUrl, val))
return cast(AnyHttpUrl, val)
class RivaCommonConfigMixin(BaseModel):
"""A collection of common Riva settings."""
encoding: RivaAudioEncoding = Field(
default=RivaAudioEncoding.LINEAR_PCM,
description="The encoding on the audio stream.",
)
sample_rate_hertz: int = Field(
default=8000, description="The sample rate frequency of audio stream."
)
language_code: str = Field(
default="en-US",
description=(
"The [BCP-47 language code]"
"(https://www.rfc-editor.org/rfc/bcp/bcp47.txt) for "
"the target language."
),
)
class _Event:
"""A combined event that is threadsafe and async safe."""
_event: threading.Event
_aevent: asyncio.Event
def __init__(self) -> None:
"""Initialize the event."""
self._event = threading.Event()
self._aevent = asyncio.Event()
def set(self) -> None:
"""Set the event."""
self._event.set()
self._aevent.set()
def clear(self) -> None:
"""Set the event."""
self._event.clear()
self._aevent.clear()
def is_set(self) -> bool:
"""Indicate if the event is set."""
return self._event.is_set()
def wait(self) -> None:
"""Wait for the event to be set."""
self._event.wait()
async def async_wait(self) -> None:
"""Async wait for the event to be set."""
await self._aevent.wait()
def _mk_wave_file(
output_directory: Optional[str], sample_rate: float
) -> Tuple[Optional[str], Optional[wave.Wave_write]]:
"""Create a new wave file and return the wave write object and filename."""
if output_directory:
with tempfile.NamedTemporaryFile(
mode="bx", suffix=".wav", delete=False, dir=output_directory
) as f:
wav_file_name = f.name
wav_file = wave.open(wav_file_name, "wb")
wav_file.setnchannels(1)
wav_file.setsampwidth(2)
wav_file.setframerate(sample_rate)
return (wav_file_name, wav_file)
return (None, None)
def _coerce_string(val: "TTSInputType") -> str:
"""Attempt to coerce the input value to a string.
This is particularly useful for converting LangChain message to strings.
"""
if isinstance(val, PromptValue):
return val.to_string()
if isinstance(val, BaseMessage):
return str(val.content)
return str(val)
def _process_chunks(inputs: Iterator["TTSInputType"]) -> Generator[str, None, None]:
"""Filter the input chunks are return strings ready for TTS."""
buffer = ""
for chunk in inputs:
chunk = _coerce_string(chunk)
# return the buffer if an end of sentence character is detected
for terminator in _SENTENCE_TERMINATORS:
while terminator in chunk:
last_sentence, chunk = chunk.split(terminator, 1)
yield buffer + last_sentence + terminator
buffer = ""
buffer += chunk
# return the buffer if is too long
if len(buffer) > _MAX_TEXT_LENGTH:
for idx in range(0, len(buffer), _MAX_TEXT_LENGTH):
yield buffer[idx : idx + 5]
buffer = ""
# return remaining buffer
if buffer:
yield buffer
# Riva AudioStream Type
StreamInputType = Union[bytes, SentinelT]
StreamOutputType = str
class AudioStream:
"""A message containing streaming audio."""
_put_lock: threading.Lock
_queue: queue.Queue
output: queue.Queue
hangup: _Event
user_talking: _Event
user_quiet: _Event
_worker: Optional[threading.Thread]
def __init__(self, maxsize: int = 0) -> None:
"""Initialize the queue."""
self._put_lock = threading.Lock()
self._queue = queue.Queue(maxsize=maxsize)
self.output = queue.Queue()
self.hangup = _Event()
self.user_quiet = _Event()
self.user_talking = _Event()
self._worker = None
def __iter__(self) -> Generator[bytes, None, None]:
"""Return an error."""
while True:
# get next item
try:
next_val = self._queue.get(True, _QUEUE_GET_TIMEOUT)
except queue.Empty:
continue
# hangup when requested
if next_val == HANGUP:
break
# yield next item
yield next_val
self._queue.task_done()
async def __aiter__(self) -> AsyncIterator[StreamInputType]:
"""Iterate through all items in the queue until HANGUP."""
while True:
# get next item
try:
next_val = await asyncio.get_event_loop().run_in_executor(
None, self._queue.get, True, _QUEUE_GET_TIMEOUT
)
except queue.Empty:
continue
# hangup when requested
if next_val == HANGUP:
break
# yield next item
yield next_val
self._queue.task_done()
@property
def hungup(self) -> bool:
"""Indicate if the audio stream has hungup."""
return self.hangup.is_set()
@property
def empty(self) -> bool:
"""Indicate in the input stream buffer is empty."""
return self._queue.empty()
@property
def complete(self) -> bool:
"""Indicate if the audio stream has hungup and been processed."""
input_done = self.hungup and self.empty
output_done = (
self._worker is not None
and not self._worker.is_alive()
and self.output.empty()
)
return input_done and output_done
@property
def running(self) -> bool:
"""Indicate if the ASR stream is running."""
if self._worker:
return self._worker.is_alive()
return False
def put(self, item: StreamInputType, timeout: Optional[int] = None) -> None:
"""Put a new item into the queue."""
with self._put_lock:
if self.hungup:
raise RuntimeError(
"The audio stream has already been hungup. Cannot put more data."
)
if item is HANGUP:
self.hangup.set()
self._queue.put(item, timeout=timeout)
async def aput(self, item: StreamInputType, timeout: Optional[int] = None) -> None:
"""Async put a new item into the queue."""
loop = asyncio.get_event_loop()
await asyncio.wait_for(loop.run_in_executor(None, self.put, item), timeout)
def close(self, timeout: Optional[int] = None) -> None:
"""Send the hangup signal."""
self.put(HANGUP, timeout)
async def aclose(self, timeout: Optional[int] = None) -> None:
"""Async send the hangup signal."""
await self.aput(HANGUP, timeout)
def register(self, responses: Iterator["rasr.StreamingRecognizeResponse"]) -> None:
"""Drain the responses from the provided iterator and put them into a queue."""
if self.running:
raise RuntimeError("An ASR instance has already been registered.")
has_started = threading.Barrier(2, timeout=5)
def worker() -> None:
"""Consume the ASR Generator."""
has_started.wait()
for response in responses:
if not response.results:
continue
for result in response.results:
if not result.alternatives:
continue
if result.is_final:
self.user_talking.clear()
self.user_quiet.set()
transcript = cast(str, result.alternatives[0].transcript)
self.output.put(transcript)
elif not self.user_talking.is_set():
self.user_talking.set()
self.user_quiet.clear()
self._worker = threading.Thread(target=worker)
self._worker.daemon = True
self._worker.start()
has_started.wait()
# RivaASR Runnable
ASRInputType = AudioStream
ASROutputType = str
class RivaASR( # type: ignore[override]
RivaAuthMixin,
RivaCommonConfigMixin,
RunnableSerializable[ASRInputType, ASROutputType],
):
"""A runnable that performs Automatic Speech Recognition (ASR) using NVIDIA Riva."""
name: str = "nvidia_riva_asr"
description: str = (
"A Runnable for converting audio bytes to a string."
"This is useful for feeding an audio stream into a chain and"
"preprocessing that audio to create an LLM prompt."
)
# riva options
audio_channel_count: int = Field(
1, description="The number of audio channels in the input audio stream."
)
profanity_filter: bool = Field(
True,
description=(
"Controls whether or not Riva should attempt to filter "
"profanity out of the transcribed text."
),
)
enable_automatic_punctuation: bool = Field(
True,
description=(
"Controls whether Riva should attempt to correct "
"senetence puncuation in the transcribed text."
),
)
@root_validator(pre=True)
@classmethod
def _validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate the Python environment and input arguments."""
_ = _import_riva_client()
return values
@property
def config(self) -> "riva.client.StreamingRecognitionConfig":
"""Create and return the riva config object."""
riva_client = _import_riva_client()
return riva_client.StreamingRecognitionConfig(
interim_results=True,
config=riva_client.RecognitionConfig(
encoding=self.encoding,
sample_rate_hertz=self.sample_rate_hertz,
audio_channel_count=self.audio_channel_count,
max_alternatives=1,
profanity_filter=self.profanity_filter,
enable_automatic_punctuation=self.enable_automatic_punctuation,
language_code=self.language_code,
),
)
def _get_service(self) -> "riva.client.ASRService":
"""Connect to the riva service and return the a client object."""
riva_client = _import_riva_client()
try:
return riva_client.ASRService(self.auth)
except Exception as err:
raise ValueError(
"Error raised while connecting to the Riva ASR server."
) from err
def invoke(
self,
input: ASRInputType,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> ASROutputType:
"""Transcribe the audio bytes into a string with Riva."""
# create an output text generator with Riva
if not input.running:
service = self._get_service()
responses = service.streaming_response_generator(
audio_chunks=input,
streaming_config=self.config,
)
input.register(responses)
# return the first valid result
full_response: List[str] = []
while not input.complete:
with input.output.not_empty:
ready = input.output.not_empty.wait(0.1)
if ready:
while not input.output.empty():
try:
full_response += [input.output.get_nowait()]
except queue.Empty:
continue
input.output.task_done()
_LOGGER.debug("Riva ASR returning: %s", repr(full_response))
return " ".join(full_response).strip()
return ""
# RivaTTS Runnable
# pylint: disable-next=invalid-name
TTSInputType = Union[str, AnyMessage, PromptValue]
TTSOutputType = bytes
class RivaTTS( # type: ignore[override]
RivaAuthMixin,
RivaCommonConfigMixin,
RunnableSerializable[TTSInputType, TTSOutputType],
):
"""A runnable that performs Text-to-Speech (TTS) with NVIDIA Riva."""
name: str = "nvidia_riva_tts"
description: str = (
"A tool for converting text to speech."
"This is useful for converting LLM output into audio bytes."
)
# riva options
voice_name: str = Field(
"English-US.Female-1",
description=(
"The voice model in Riva to use for speech. "
"Pre-trained models are documented in "
"[the Riva documentation]"
"(https://docs.nvidia.com/deeplearning/riva/user-guide/docs/tts/tts-overview.html)."
),
)
output_directory: Optional[str] = Field(
None,
description=(
"The directory where all audio files should be saved. "
"A null value indicates that wave files should not be saved. "
"This is useful for debugging purposes."
),
)
@root_validator(pre=True)
@classmethod
def _validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate the Python environment and input arguments."""
_ = _import_riva_client()
return values
@validator("output_directory")
@classmethod
def _output_directory_validator(cls, v: str) -> str:
if v:
dirpath = pathlib.Path(v)
dirpath.mkdir(parents=True, exist_ok=True)
return str(dirpath.absolute())
return v
def _get_service(self) -> "riva.client.SpeechSynthesisService":
"""Connect to the riva service and return the a client object."""
riva_client = _import_riva_client()
try:
return riva_client.SpeechSynthesisService(self.auth)
except Exception as err:
raise ValueError(
"Error raised while connecting to the Riva TTS server."
) from err
def invoke(
self,
input: TTSInputType,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> TTSOutputType:
"""Perform TTS by taking a string and outputting the entire audio file."""
return b"".join(self.transform(iter([input])))
def transform(
self,
input: Iterator[TTSInputType],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[TTSOutputType]:
"""Perform TTS by taking a stream of characters and streaming output bytes."""
service = self._get_service()
# create an output wave file
wav_file_name, wav_file = _mk_wave_file(
self.output_directory, self.sample_rate_hertz
)
# split the input text and perform tts
for chunk in _process_chunks(input):
_LOGGER.debug("Riva TTS chunk: %s", chunk)
# start riva tts streaming
responses = service.synthesize_online(
text=chunk,
voice_name=self.voice_name,
language_code=self.language_code,
encoding=self.encoding.riva_pb2,
sample_rate_hz=self.sample_rate_hertz,
)
# stream audio bytes out
for resp in responses:
audio = cast(bytes, resp.audio)
if wav_file:
wav_file.writeframesraw(audio)
yield audio
# close the wave file when we are done
if wav_file:
wav_file.close()
_LOGGER.debug("Riva TTS wrote file: %s", wav_file_name)
async def atransform(
self,
input: AsyncIterator[TTSInputType],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncGenerator[TTSOutputType, None]:
"""Intercept async transforms and route them to the synchronous transform."""
loop = asyncio.get_running_loop()
input_queue: queue.Queue = queue.Queue()
out_queue: asyncio.Queue = asyncio.Queue()
async def _producer() -> None:
"""Produce input into the input queue."""
async for val in input:
input_queue.put_nowait(val)
input_queue.put_nowait(_TRANSFORM_END)
def _input_iterator() -> Iterator[TTSInputType]:
"""Iterate over the input_queue."""
while True:
try:
val = input_queue.get(timeout=0.5)
except queue.Empty:
continue
if val == _TRANSFORM_END:
break
yield val
def _consumer() -> None:
"""Consume the input with transform."""
for val in self.transform(_input_iterator()):
out_queue.put_nowait(val)
out_queue.put_nowait(_TRANSFORM_END)
async def _consumer_coro() -> None:
"""Coroutine that wraps the consumer."""
await loop.run_in_executor(None, _consumer)
producer = loop.create_task(_producer())
consumer = loop.create_task(_consumer_coro())
while True:
try:
val = await asyncio.wait_for(out_queue.get(), 0.5)
except asyncio.exceptions.TimeoutError:
continue
out_queue.task_done()
if val is _TRANSFORM_END:
break
yield val
await producer
await consumer
# Backwards compatibility:
NVIDIARivaASR = RivaASR
NVIDIARivaTTS = RivaTTS
NVIDIARivaStream = AudioStream
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/golden_query.py | """Util that calls Golden."""
import json
from typing import Any, Dict, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
GOLDEN_BASE_URL = "https://golden.com"
GOLDEN_TIMEOUT = 5000
class GoldenQueryAPIWrapper(BaseModel):
"""Wrapper for Golden.
Docs for using:
1. Go to https://golden.com and sign up for an account
2. Get your API Key from https://golden.com/settings/api
3. Save your API Key into GOLDEN_API_KEY env variable
"""
golden_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
golden_api_key = get_from_dict_or_env(
values, "golden_api_key", "GOLDEN_API_KEY"
)
values["golden_api_key"] = golden_api_key
return values
def run(self, query: str) -> str:
"""Run query through Golden Query API and return the JSON raw result."""
headers = {"apikey": self.golden_api_key or ""}
response = requests.post(
f"{GOLDEN_BASE_URL}/api/v2/public/queries/",
json={"prompt": query},
headers=headers,
timeout=GOLDEN_TIMEOUT,
)
if response.status_code != 201:
return response.text
content = json.loads(response.content)
query_id = content["id"]
response = requests.get(
(
f"{GOLDEN_BASE_URL}/api/v2/public/queries/{query_id}/results/"
"?pageSize=10"
),
headers=headers,
timeout=GOLDEN_TIMEOUT,
)
return response.text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/cassandra.py | from __future__ import annotations
import asyncio
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable
if TYPE_CHECKING:
from cassandra.cluster import ResponseFuture, Session
async def wrapped_response_future(
func: Callable[..., ResponseFuture], *args: Any, **kwargs: Any
) -> Any:
"""Wrap a Cassandra response future in an asyncio future.
Args:
func: The Cassandra function to call.
*args: The arguments to pass to the Cassandra function.
**kwargs: The keyword arguments to pass to the Cassandra function.
Returns:
The result of the Cassandra function.
"""
loop = asyncio.get_event_loop()
asyncio_future = loop.create_future()
response_future = func(*args, **kwargs)
def success_handler(_: Any) -> None:
loop.call_soon_threadsafe(asyncio_future.set_result, response_future.result())
def error_handler(exc: BaseException) -> None:
loop.call_soon_threadsafe(asyncio_future.set_exception, exc)
response_future.add_callbacks(success_handler, error_handler)
return await asyncio_future
async def aexecute_cql(session: Session, query: str, **kwargs: Any) -> Any:
"""Execute a CQL query asynchronously.
Args:
session: The Cassandra session to use.
query: The CQL query to execute.
kwargs: Additional keyword arguments to pass to the session execute method.
Returns:
The result of the query.
"""
return await wrapped_response_future(session.execute_async, query, **kwargs)
class SetupMode(Enum):
SYNC = 1
ASYNC = 2
OFF = 3
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/gitlab.py | """Util that calls gitlab."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
if TYPE_CHECKING:
from gitlab.v4.objects import Issue
class GitLabAPIWrapper(BaseModel):
"""Wrapper for GitLab API."""
gitlab: Any = None #: :meta private:
gitlab_repo_instance: Any = None #: :meta private:
gitlab_repository: Optional[str] = None
"""The name of the GitLab repository, in the form {username}/{repo-name}."""
gitlab_personal_access_token: Optional[str] = None
"""Personal access token for the GitLab service, used for authentication."""
gitlab_branch: Optional[str] = None
"""The specific branch in the GitLab repository where the bot will make
its commits. Defaults to 'main'.
"""
gitlab_base_branch: Optional[str] = None
"""The base branch in the GitLab repository, used for comparisons.
Usually 'main' or 'master'. Defaults to 'main'.
"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
gitlab_url = get_from_dict_or_env(
values, "gitlab_url", "GITLAB_URL", default="https://gitlab.com"
)
gitlab_repository = get_from_dict_or_env(
values, "gitlab_repository", "GITLAB_REPOSITORY"
)
gitlab_personal_access_token = get_from_dict_or_env(
values, "gitlab_personal_access_token", "GITLAB_PERSONAL_ACCESS_TOKEN"
)
gitlab_branch = get_from_dict_or_env(
values, "gitlab_branch", "GITLAB_BRANCH", default="main"
)
gitlab_base_branch = get_from_dict_or_env(
values, "gitlab_base_branch", "GITLAB_BASE_BRANCH", default="main"
)
try:
import gitlab
except ImportError:
raise ImportError(
"python-gitlab is not installed. "
"Please install it with `pip install python-gitlab`"
)
g = gitlab.Gitlab(
url=gitlab_url,
private_token=gitlab_personal_access_token,
keep_base_url=True,
)
g.auth()
values["gitlab"] = g
values["gitlab_repo_instance"] = g.projects.get(gitlab_repository)
values["gitlab_repository"] = gitlab_repository
values["gitlab_personal_access_token"] = gitlab_personal_access_token
values["gitlab_branch"] = gitlab_branch
values["gitlab_base_branch"] = gitlab_base_branch
return values
def parse_issues(self, issues: List[Issue]) -> List[dict]:
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of gitlab Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for issue in issues:
title = issue.title
number = issue.iid
parsed.append({"title": title, "number": number})
return parsed
def get_issues(self) -> str:
"""
Fetches all open issues from the repo
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
"""
issues = self.gitlab_repo_instance.issues.list(state="opened")
if len(issues) > 0:
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
else:
return "No open issues available"
def get_issue(self, issue_number: int) -> Dict[str, Any]:
"""
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the gitlab issue
Returns:
dict: A dictionary containing the issue's title,
body, and comments as a string
"""
issue = self.gitlab_repo_instance.issues.get(issue_number)
page = 0
comments: List[dict] = []
while len(comments) <= 10:
comments_page = issue.notes.list(page=page)
if len(comments_page) == 0:
break
for comment in comments_page:
comment = issue.notes.get(comment.id)
comments.append(
{"body": comment.body, "user": comment.author["username"]}
)
page += 1
return {
"title": issue.title,
"body": issue.description,
"comments": str(comments),
}
def create_pull_request(self, pr_query: str) -> str:
"""
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README\nmade changes to add info"
Returns:
str: A success or failure message
"""
if self.gitlab_base_branch == self.gitlab_branch:
return """Cannot make a pull request because
commits are already in the master branch"""
else:
try:
title = pr_query.split("\n")[0]
body = pr_query[len(title) + 2 :]
pr = self.gitlab_repo_instance.mergerequests.create(
{
"source_branch": self.gitlab_branch,
"target_branch": self.gitlab_base_branch,
"title": title,
"description": body,
"labels": ["created-by-agent"],
}
)
return f"Successfully created PR number {pr.iid}"
except Exception as e:
return "Unable to make pull request due to error:\n" + str(e)
def comment_on_issue(self, comment_query: str) -> str:
"""
Adds a comment to a gitlab issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1\n\nWorking on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
"""
issue_number = int(comment_query.split("\n\n")[0])
comment = comment_query[len(str(issue_number)) + 2 :]
try:
issue = self.gitlab_repo_instance.issues.get(issue_number)
issue.notes.create({"body": comment})
return "Commented on issue " + str(issue_number)
except Exception as e:
return "Unable to make comment due to error:\n" + str(e)
def create_file(self, file_query: str) -> str:
"""
Creates a new file on the gitlab repo
Parameters:
file_query(str): a string which contains the file path
and the file contents. The file path is the first line
in the string, and the contents are the rest of the string.
For example, "hello_world.md\n# Hello World!"
Returns:
str: A success or failure message
"""
if self.gitlab_branch == self.gitlab_base_branch:
return (
"You're attempting to commit directly"
f"to the {self.gitlab_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
file_path = file_query.split("\n")[0]
file_contents = file_query[len(file_path) + 2 :]
try:
self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch)
return f"File already exists at {file_path}. Use update_file instead"
except Exception:
data = {
"branch": self.gitlab_branch,
"commit_message": "Create " + file_path,
"file_path": file_path,
"content": file_contents,
}
self.gitlab_repo_instance.files.create(data)
return "Created file " + file_path
def read_file(self, file_path: str) -> str:
"""
Reads a file from the gitlab repo
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string
"""
file = self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch)
return file.decode().decode("utf-8")
def update_file(self, file_query: str) -> str:
"""
Updates a file with new content.
Parameters:
file_query(str): Contains the file path and the file contents.
The old file contents is wrapped in OLD <<<< and >>>> OLD
The new file contents is wrapped in NEW <<<< and >>>> NEW
For example:
test/hello.txt
OLD <<<<
Hello Earth!
>>>> OLD
NEW <<<<
Hello Mars!
>>>> NEW
Returns:
A success or failure message
"""
if self.gitlab_branch == self.gitlab_base_branch:
return (
"You're attempting to commit directly"
f"to the {self.gitlab_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
try:
file_path = file_query.split("\n")[0]
old_file_contents = (
file_query.split("OLD <<<<")[1].split(">>>> OLD")[0].strip()
)
new_file_contents = (
file_query.split("NEW <<<<")[1].split(">>>> NEW")[0].strip()
)
file_content = self.read_file(file_path)
updated_file_content = file_content.replace(
old_file_contents, new_file_contents
)
if file_content == updated_file_content:
return (
"File content was not updated because old content was not found."
"It may be helpful to use the read_file action to get "
"the current file contents."
)
commit = {
"branch": self.gitlab_branch,
"commit_message": "Create " + file_path,
"actions": [
{
"action": "update",
"file_path": file_path,
"content": updated_file_content,
}
],
}
self.gitlab_repo_instance.commits.create(commit)
return "Updated file " + file_path
except Exception as e:
return "Unable to update file due to error:\n" + str(e)
def delete_file(self, file_path: str) -> str:
"""
Deletes a file from the repo
Parameters:
file_path(str): Where the file is
Returns:
str: Success or failure message
"""
if self.gitlab_branch == self.gitlab_base_branch:
return (
"You're attempting to commit directly"
f"to the {self.gitlab_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
try:
self.gitlab_repo_instance.files.delete(
file_path, self.gitlab_branch, "Delete " + file_path
)
return "Deleted file " + file_path
except Exception as e:
return "Unable to delete file due to error:\n" + str(e)
def run(self, mode: str, query: str) -> str:
if mode == "get_issues":
return self.get_issues()
elif mode == "get_issue":
return json.dumps(self.get_issue(int(query)))
elif mode == "comment_on_issue":
return self.comment_on_issue(query)
elif mode == "create_file":
return self.create_file(query)
elif mode == "create_pull_request":
return self.create_pull_request(query)
elif mode == "read_file":
return self.read_file(query)
elif mode == "update_file":
return self.update_file(query)
elif mode == "delete_file":
return self.delete_file(query)
else:
raise ValueError("Invalid mode" + mode)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_lens.py | """Util that calls Google Lens Search."""
from typing import Any, Dict, Optional, cast
import requests
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
class GoogleLensAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Lens API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleLensAPIWrapper
google_lens = GoogleLensAPIWrapper()
google_lens.run('langchain')
"""
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
return values
def run(self, query: str) -> str:
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_lens",
"api_key": serpapi_api_key.get_secret_value(),
"url": query,
}
queryURL = f"https://serpapi.com/search?engine={params['engine']}&api_key={params['api_key']}&url={params['url']}"
response = requests.get(queryURL)
if response.status_code != 200:
return "Google Lens search failed"
responseValue = response.json()
if responseValue["search_metadata"]["status"] != "Success":
return "Google Lens search failed"
xs = ""
if (
"knowledge_graph" in responseValue
and len(responseValue["knowledge_graph"]) > 0
):
subject = responseValue["knowledge_graph"][0]
xs += f"Subject:{subject['title']}({subject['subtitle']})\n"
xs += f"Link to subject:{subject['link']}\n\n"
xs += "Related Images:\n\n"
for image in responseValue["visual_matches"]:
xs += f"Title: {image['title']}\n"
xs += f"Source({image['source']}): {image['link']}\n"
xs += f"Image: {image['thumbnail']}\n\n"
if "reverse_image_search" in responseValue:
xs += (
"Reverse Image Search"
+ f"Link: {responseValue['reverse_image_search']['link']}\n"
)
print(xs) # noqa: T201
docs = [xs]
return "\n\n".join(docs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/wolfram_alpha.py | """Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any = None #: :meta private:
wolfram_alpha_appid: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/bibtex.py | """Util that calls bibtexparser."""
import logging
from typing import Any, Dict, List, Mapping
from pydantic import BaseModel, ConfigDict, model_validator
logger = logging.getLogger(__name__)
OPTIONAL_FIELDS = [
"annotate",
"booktitle",
"editor",
"howpublished",
"journal",
"keywords",
"note",
"organization",
"publisher",
"school",
"series",
"type",
"doi",
"issn",
"isbn",
]
class BibtexparserWrapper(BaseModel):
"""Wrapper around bibtexparser.
To use, you should have the ``bibtexparser`` python package installed.
https://bibtexparser.readthedocs.io/en/master/
This wrapper will use bibtexparser to load a collection of references from
a bibtex file and fetch document summaries.
"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
import bibtexparser # noqa
except ImportError:
raise ImportError(
"Could not import bibtexparser python package. "
"Please install it with `pip install bibtexparser`."
)
return values
def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
def get_metadata(
self, entry: Mapping[str, Any], load_extra: bool = False
) -> Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get("journal") or entry.get("booktitle")
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
url = f'https://doi.org/{entry["doi"]}'
else:
url = None
meta = {
"id": entry.get("ID"),
"published_year": entry.get("year"),
"title": entry.get("title"),
"publication": publication,
"authors": entry.get("author"),
"abstract": entry.get("abstract"),
"url": url,
}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/powerbi.py | """Wrapper around a Power BI endpoint."""
from __future__ import annotations
import asyncio
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
import aiohttp
import requests
from aiohttp import ClientTimeout, ServerTimeoutError
from pydantic import (
BaseModel,
ConfigDict,
Field,
model_validator,
)
from requests.exceptions import Timeout
logger = logging.getLogger(__name__)
BASE_URL = os.getenv("POWERBI_BASE_URL", "https://api.powerbi.com/v1.0/myorg")
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class PowerBIDataset(BaseModel):
"""Create PowerBI engine from dataset ID and credential or token.
Use either the credential or a supplied token to authenticate.
If both are supplied the credential is used to generate a token.
The impersonated_user_name is the UPN of a user to be impersonated.
If the model is not RLS enabled, this will be ignored.
"""
dataset_id: str
table_names: List[str]
group_id: Optional[str] = None
credential: Optional[TokenCredential] = None
token: Optional[str] = None
impersonated_user_name: Optional[str] = None
sample_rows_in_table_info: int = Field(default=1, gt=0, le=10)
schemas: Dict[str, str] = Field(default_factory=dict)
aiosession: Optional[aiohttp.ClientSession] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_params(cls, values: Dict[str, Any]) -> Any:
"""Validate that at least one of token and credentials is present."""
table_names = values.get("table_names", [])
values["table_names"] = [fix_table_name(table) for table in table_names]
if "token" in values or "credential" in values:
return values
raise ValueError("Please provide either a credential or a token.")
@property
def request_url(self) -> str:
"""Get the request url."""
if self.group_id:
return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # pylint: disable=C0301
@property
def headers(self) -> Dict[str, str]:
"""Get the token."""
if self.token:
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.token,
}
from azure.core.exceptions import (
ClientAuthenticationError, # pylint: disable=import-outside-toplevel
)
if self.credential:
try:
token = self.credential.get_token(
"https://analysis.windows.net/powerbi/api/.default"
).token
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + token,
}
except Exception as exc: # pylint: disable=broad-exception-caught
raise ClientAuthenticationError(
"Could not get a token from the supplied credentials."
) from exc
raise ClientAuthenticationError("No credential or token supplied.")
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
return self.table_names
def get_schemas(self) -> str:
"""Get the available schema's."""
if self.schemas:
return ", ".join([f"{key}: {value}" for key, value in self.schemas.items()])
return "No known schema's yet. Use the schema_powerbi tool first."
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def _get_tables_to_query(
self, table_names: Optional[Union[List[str], str]] = None
) -> Optional[List[str]]:
"""Get the tables names that need to be queried, after checking they exist."""
if table_names is not None:
if (
isinstance(table_names, list)
and len(table_names) > 0
and table_names[0] != ""
):
fixed_tables = [fix_table_name(table) for table in table_names]
non_existing_tables = [
table for table in fixed_tables if table not in self.table_names
]
if non_existing_tables:
logger.warning(
"Table(s) %s not found in dataset.",
", ".join(non_existing_tables),
)
tables = [
table for table in fixed_tables if table not in non_existing_tables
]
return tables if tables else None
if isinstance(table_names, str) and table_names != "":
if table_names not in self.table_names:
logger.warning("Table %s not found in dataset.", table_names)
return None
return [fix_table_name(table_names)]
return self.table_names
def _get_tables_todo(self, tables_todo: List[str]) -> List[str]:
"""Get the tables that still need to be queried."""
return [table for table in tables_todo if table not in self.schemas]
def _get_schema_for_tables(self, table_names: List[str]) -> str:
"""Create a string of the table schemas for the supplied tables."""
schemas = [
schema for table, schema in self.schemas.items() if table in table_names
]
return ", ".join(schemas)
def get_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested)
for table in tables_todo:
self._get_schema(table)
return self._get_schema_for_tables(tables_requested)
async def aget_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested)
await asyncio.gather(*[self._aget_schema(table) for table in tables_todo])
return self._get_schema_for_tables(tables_requested)
def _get_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = self.run(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except Timeout:
logger.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
logger.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown"
async def _aget_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = await self.arun(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except ServerTimeoutError:
logger.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
logger.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown"
def _create_json_content(self, command: str) -> dict[str, Any]:
"""Create the json content for the request."""
return {
"queries": [{"query": rf"{command}"}],
"impersonatedUserName": self.impersonated_user_name,
"serializerSettings": {"includeNulls": True},
}
def run(self, command: str) -> Any:
"""Execute a DAX command and return a json representing the results."""
logger.debug("Running command: %s", command)
response = requests.post(
self.request_url,
json=self._create_json_content(command),
headers=self.headers,
timeout=10,
)
if response.status_code == 403:
return (
"TokenError: Could not login to PowerBI, please check your credentials."
)
return response.json()
async def arun(self, command: str) -> Any:
"""Execute a DAX command and return the result asynchronously."""
logger.debug("Running command: %s", command)
if self.aiosession:
async with self.aiosession.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=ClientTimeout(total=10),
) as response:
if response.status == 403:
return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501
response_json = await response.json(content_type=response.content_type)
return response_json
async with aiohttp.ClientSession() as session:
async with session.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=ClientTimeout(total=10),
) as response:
if response.status == 403:
return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501
response_json = await response.json(content_type=response.content_type)
return response_json
def json_to_md(
json_contents: List[Dict[str, Union[str, int, float]]],
table_name: Optional[str] = None,
) -> str:
"""Convert a JSON object to a markdown table."""
if len(json_contents) == 0:
return ""
output_md = ""
headers = json_contents[0].keys()
for header in headers:
header.replace("[", ".").replace("]", "")
if table_name:
header.replace(f"{table_name}.", "")
output_md += f"| {header} "
output_md += "|\n"
for row in json_contents:
for value in row.values():
output_md += f"| {value} "
output_md += "|\n"
return output_md
def fix_table_name(table: str) -> str:
"""Add single quotes around table names that contain spaces."""
if " " in table and not table.startswith("'") and not table.endswith("'"):
return f"'{table}'"
return table
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/searx_search.py | """Utility for using SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
`multiple search engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and
supports the `OpenSearch
<https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_
specification.
More details on the installation instructions `here. <../../integrations/searx.html>`_
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start
-----------
In order to use this utility you need to provide the searx host. This can be done
by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>`
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the ``search`` instance to query the searx API.
Searching
---------
Use the :meth:`run() <SearxSearchWrapper.run>` and
:meth:`results() <SearxSearchWrapper.results>` methods to query the searx API.
Other methods are available for convenience.
:class:`SearxResults` is a convenience wrapper around the raw json result.
Example usage of the ``run`` method to make a search:
.. code-block:: python
s.run(query="what is the best search engine?")
Engine Parameters
-----------------
You can pass any `accepted searx search API
<https://docs.searxng.org/dev/search_api.html>`_ parameters to the
:py:class:`SearxSearchWrapper` instance.
In the following example we are using the
:attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters:
.. code-block:: python
# assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips
-----------
Searx offers a special
`search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_
that can also be used instead of passing engine parameters.
For example the following query:
.. code-block:: python
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the `run()` method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
.. code-block:: python
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
*NOTE*: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See `SearxNG Configured Engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and
`SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_
for more details.
Notes
-----
This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described
`here <https://docs.searxng.org/src/searx.botdetection.html#limiter-src>`_.
For a list of public SearxNG instances see https://searx.space/
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import (
BaseModel,
ConfigDict,
Field,
PrivateAttr,
model_validator,
)
def _get_default_params() -> dict:
return {"language": "en", "format": "json"}
class SearxResults(dict):
"""Dict like wrapper around search api results."""
_data: str = ""
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
def __str__(self) -> str:
"""Text representation of searx result."""
return self._data
@property
def results(self) -> Any:
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get("results")
@property
def answers(self) -> Any:
"""Helper accessor on the json result."""
return self.get("answers")
class SearxSearchWrapper(BaseModel):
"""Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
``searx_host`` or exporting the environment variable ``SEARX_HOST``.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL.
Example:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
"""
_result: SearxResults = PrivateAttr()
searx_host: str = ""
unsecure: bool = False
params: dict = Field(default_factory=_get_default_params)
headers: Optional[dict] = None
engines: Optional[List[str]] = []
categories: Optional[List[str]] = []
query_suffix: Optional[str] = ""
k: int = 10
aiosession: Optional[Any] = None
@model_validator(mode="before")
@classmethod
def validate_params(cls, values: Dict) -> Any:
"""Validate that custom searx params are merged with default ones."""
user_params = values.get("params", {})
default = _get_default_params()
values["params"] = {**default, **user_params}
engines = values.get("engines")
if engines:
values["params"]["engines"] = ",".join(engines)
categories = values.get("categories")
if categories:
values["params"]["categories"] = ",".join(categories)
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
if not searx_host.startswith("http"):
print( # noqa: T201
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
values["searx_host"] = searx_host
return values
model_config = ConfigDict(
extra="forbid",
)
def _searx_api_query(self, params: dict) -> SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
)
# test if http result is ok
if not raw_result.ok:
raise ValueError("Searx API returned an error: ", raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
async def _asearx_api_query(self, params: dict) -> SearxResults:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
kwargs: Dict = {
"headers": self.headers,
"params": params,
}
if self.unsecure:
kwargs["ssl"] = False
async with session.get(self.searx_host, **kwargs) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
else:
async with self.aiosession.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
return result
def run(
self,
query: str,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occurred with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain_community.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
async def arun(
self,
query: str,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Asynchronously version of `run`."""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
res = await self._asearx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
def results(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
async def aresults(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Asynchronously query with json results.
Uses aiohttp. See `results` for more info.
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
results = (await self._asearx_api_query(params)).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/arxiv.py | """Util that calls Arxiv."""
import logging
import os
import re
from typing import Any, Dict, Iterator, List, Optional
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
class ArxivAPIWrapper(BaseModel):
"""Wrapper around ArxivAPI.
To use, you should have the ``arxiv`` python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
If the query is in the form of arxiv identifier
(see https://info.arxiv.org/help/find/index.html), it will return the paper
corresponding to the arxiv identifier.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don't want to limit the content size.
Attributes:
top_k_results: number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool.
continue_on_failure (bool): If True, continue loading other URLs on failure.
load_max_docs: a limit to the number of loaded documents
load_all_available_meta:
if True: the `metadata` of the loaded Documents contains all available
meta info (see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the `metadata` contains only the published date, title,
authors and summary.
doc_content_chars_max: an optional cut limit for the length of a document's
content
Example:
.. code-block:: python
from langchain_community.utilities.arxiv import ArxivAPIWrapper
arxiv = ArxivAPIWrapper(
top_k_results = 3,
ARXIV_MAX_QUERY_LENGTH = 300,
load_max_docs = 3,
load_all_available_meta = False,
doc_content_chars_max = 40000
)
arxiv.run("tree of thought llm")
"""
arxiv_search: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
top_k_results: int = 3
ARXIV_MAX_QUERY_LENGTH: int = 300
continue_on_failure: bool = False
load_max_docs: int = 100
load_all_available_meta: bool = False
doc_content_chars_max: Optional[int] = 4000
def is_arxiv_identifier(self, query: str) -> bool:
"""Check if a query is an arxiv identifier."""
arxiv_identifier_pattern = r"\d{2}(0[1-9]|1[0-2])\.\d{4,5}(v\d+|)|\d{7}.*"
for query_item in query[: self.ARXIV_MAX_QUERY_LENGTH].split():
match_result = re.match(arxiv_identifier_pattern, query_item)
if not match_result:
return False
assert match_result is not None
if not match_result.group(0) == query_item:
return False
return True
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
values["arxiv_result"] = arxiv.Result
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
def _fetch_results(self, query: str) -> Any:
"""Helper function to fetch arxiv results based on query."""
if self.is_arxiv_identifier(query):
return self.arxiv_search(
id_list=query.split(), max_results=self.top_k_results
).results()
return self.arxiv_search(
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results
).results()
def get_summaries_as_docs(self, query: str) -> List[Document]:
"""
Performs an arxiv search and returns list of
documents, with summaries as the content.
If an error occurs or no documents found, error text
is returned instead. Wrapper for
https://lukasschwab.me/arxiv.py/index.html#Search
Args:
query: a plaintext search query
"""
try:
results = self._fetch_results(
query
) # Using helper function to fetch results
except self.arxiv_exceptions as ex:
logger.error(f"Arxiv exception: {ex}") # Added error logging
return [Document(page_content=f"Arxiv exception: {ex}")]
docs = [
Document(
page_content=result.summary,
metadata={
"Entry ID": result.entry_id,
"Published": result.updated.date(),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
},
)
for result in results
]
return docs
def run(self, query: str) -> str:
"""
Performs an arxiv search and A single string
with the publish date, title, authors, and summary
for each article separated by two newlines.
If an error occurs or no documents found, error text
is returned instead. Wrapper for
https://lukasschwab.me/arxiv.py/index.html#Search
Args:
query: a plaintext search query
"""
try:
results = self._fetch_results(
query
) # Using helper function to fetch results
except self.arxiv_exceptions as ex:
logger.error(f"Arxiv exception: {ex}") # Added error logging
return f"Arxiv exception: {ex}"
docs = [
f"Published: {result.updated.date()}\n"
f"Title: {result.title}\n"
f"Authors: {', '.join(a.name for a in result.authors)}\n"
f"Summary: {result.summary}"
for result in results
]
if docs:
return "\n\n".join(docs)[: self.doc_content_chars_max]
else:
return "No good Arxiv Result was found"
def load(self, query: str) -> List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them in a List.
Args:
query: a plaintext search query
"""
return list(self.lazy_load(query))
def lazy_load(self, query: str) -> Iterator[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them.
Args:
query: a plaintext search query
"""
try:
import fitz
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
try:
# Remove the ":" and "-" from the query, as they can cause search problems
query = query.replace(":", "").replace("-", "")
results = self._fetch_results(
query
) # Using helper function to fetch results
except self.arxiv_exceptions as ex:
logger.debug("Error on arxiv: %s", ex)
return
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = "".join(page.get_text() for page in doc_file)
except (FileNotFoundError, fitz.fitz.FileDataError) as f_ex:
logger.debug(f_ex)
continue
except Exception as e:
if self.continue_on_failure:
logger.error(e)
continue
else:
raise e
if self.load_all_available_meta:
extra_metadata = {
"entry_id": result.entry_id,
"published_first_time": str(result.published.date()),
"comment": result.comment,
"journal_ref": result.journal_ref,
"doi": result.doi,
"primary_category": result.primary_category,
"categories": result.categories,
"links": [link.href for link in result.links],
}
else:
extra_metadata = {}
metadata = {
"Published": str(result.updated.date()),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
"Summary": result.summary,
**extra_metadata,
}
yield Document(
page_content=text[: self.doc_content_chars_max], metadata=metadata
)
os.remove(doc_file_name)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_places_api.py | """Chain that calls Google Places API."""
import logging
from typing import Any, Dict, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GooglePlacesAPIWrapper",
)
class GooglePlacesAPIWrapper(BaseModel):
"""Wrapper around Google Places API.
To use, you should have the ``googlemaps`` python package installed,
**an API key for the google maps platform**,
and the environment variable ''GPLACES_API_KEY''
set with your API key , or pass 'gplaces_api_key'
as a named parameter to the constructor.
By default, this will return the all the results on the input query.
You can use the top_k_results argument to limit the number of results.
Example:
.. code-block:: python
from langchain_community.utilities import GooglePlacesAPIWrapper
gplaceapi = GooglePlacesAPIWrapper()
"""
gplaces_api_key: Optional[str] = None
google_map_client: Any = None #: :meta private:
top_k_results: Optional[int] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key is in your environment variable."""
gplaces_api_key = get_from_dict_or_env(
values, "gplaces_api_key", "GPLACES_API_KEY"
)
values["gplaces_api_key"] = gplaces_api_key
try:
import googlemaps
values["google_map_client"] = googlemaps.Client(gplaces_api_key)
except ImportError:
raise ImportError(
"Could not import googlemaps python package. "
"Please install it with `pip install googlemaps`."
)
return values
def run(self, query: str) -> str:
"""Run Places search and get k number of places that exists that match."""
search_results = self.google_map_client.places(query)["results"]
num_to_return = len(search_results)
places = []
if num_to_return == 0:
return "Google Places did not find any places that match the description"
num_to_return = (
num_to_return
if self.top_k_results is None
else min(num_to_return, self.top_k_results)
)
for i in range(num_to_return):
result = search_results[i]
details = self.fetch_place_details(result["place_id"])
if details is not None:
places.append(details)
return "\n".join([f"{i+1}. {item}" for i, item in enumerate(places)])
def fetch_place_details(self, place_id: str) -> Optional[str]:
try:
place_details = self.google_map_client.place(place_id)
place_details["place_id"] = place_id
formatted_details = self.format_place_details(place_details)
return formatted_details
except Exception as e:
logging.error(f"An Error occurred while fetching place details: {e}")
return None
def format_place_details(self, place_details: Dict[str, Any]) -> Optional[str]:
try:
name = place_details.get("result", {}).get("name", "Unknown")
address = place_details.get("result", {}).get(
"formatted_address", "Unknown"
)
phone_number = place_details.get("result", {}).get(
"formatted_phone_number", "Unknown"
)
website = place_details.get("result", {}).get("website", "Unknown")
place_id = place_details.get("result", {}).get("place_id", "Unknown")
formatted_details = (
f"{name}\nAddress: {address}\n"
f"Google place ID: {place_id}\n"
f"Phone: {phone_number}\nWebsite: {website}\n\n"
)
return formatted_details
except Exception as e:
logging.error(f"An error occurred while formatting place details: {e}")
return None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/github.py | """Util that calls GitHub."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
if TYPE_CHECKING:
from github.Issue import Issue
from github.PullRequest import PullRequest
def _import_tiktoken() -> Any:
"""Import tiktoken."""
try:
import tiktoken
except ImportError:
raise ImportError(
"tiktoken is not installed. "
"Please install it with `pip install tiktoken`"
)
return tiktoken
class GitHubAPIWrapper(BaseModel):
"""Wrapper for GitHub API."""
github: Any = None #: :meta private:
github_repo_instance: Any = None #: :meta private:
github_repository: Optional[str] = None
github_app_id: Optional[str] = None
github_app_private_key: Optional[str] = None
active_branch: Optional[str] = None
github_base_branch: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
github_repository = get_from_dict_or_env(
values, "github_repository", "GITHUB_REPOSITORY"
)
github_app_id = get_from_dict_or_env(values, "github_app_id", "GITHUB_APP_ID")
github_app_private_key = get_from_dict_or_env(
values, "github_app_private_key", "GITHUB_APP_PRIVATE_KEY"
)
try:
from github import Auth, GithubIntegration
except ImportError:
raise ImportError(
"PyGithub is not installed. "
"Please install it with `pip install PyGithub`"
)
try:
# interpret the key as a file path
# fallback to interpreting as the key itself
with open(github_app_private_key, "r") as f:
private_key = f.read()
except Exception:
private_key = github_app_private_key
auth = Auth.AppAuth(
github_app_id,
private_key,
)
gi = GithubIntegration(auth=auth)
installation = gi.get_installations()
if not installation:
raise ValueError(
f"Please make sure to install the created github app with id "
f"{github_app_id} on the repo: {github_repository}"
"More instructions can be found at "
"https://docs.github.com/en/apps/using-"
"github-apps/installing-your-own-github-app"
)
try:
installation = installation[0]
except ValueError as e:
raise ValueError(
"Please make sure to give correct github parameters "
f"Error message: {e}"
)
# create a GitHub instance:
g = installation.get_github_for_installation()
repo = g.get_repo(github_repository)
github_base_branch = get_from_dict_or_env(
values,
"github_base_branch",
"GITHUB_BASE_BRANCH",
default=repo.default_branch,
)
active_branch = get_from_dict_or_env(
values,
"active_branch",
"ACTIVE_BRANCH",
default=repo.default_branch,
)
values["github"] = g
values["github_repo_instance"] = repo
values["github_repository"] = github_repository
values["github_app_id"] = github_app_id
values["github_app_private_key"] = github_app_private_key
values["active_branch"] = active_branch
values["github_base_branch"] = github_base_branch
return values
def parse_issues(self, issues: List[Issue]) -> List[dict]:
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for issue in issues:
title = issue.title
number = issue.number
opened_by = issue.user.login if issue.user else None
issue_dict = {"title": title, "number": number}
if opened_by is not None:
issue_dict["opened_by"] = opened_by
parsed.append(issue_dict)
return parsed
def parse_pull_requests(self, pull_requests: List[PullRequest]) -> List[dict]:
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for pr in pull_requests:
parsed.append(
{
"title": pr.title,
"number": pr.number,
"commits": str(pr.commits),
"comments": str(pr.comments),
}
)
return parsed
def get_issues(self) -> str:
"""
Fetches all open issues from the repo excluding pull requests
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
"""
issues = self.github_repo_instance.get_issues(state="open")
# Filter out pull requests (part of GH issues object)
issues = [issue for issue in issues if not issue.pull_request]
if issues:
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
else:
return "No open issues available"
def list_open_pull_requests(self) -> str:
"""
Fetches all open PRs from the repo
Returns:
str: A plaintext report containing the number of PRs
and each PR's title and number.
"""
# issues = self.github_repo_instance.get_issues(state="open")
pull_requests = self.github_repo_instance.get_pulls(state="open")
if pull_requests.totalCount > 0:
parsed_prs = self.parse_pull_requests(pull_requests)
parsed_prs_str = (
"Found " + str(len(parsed_prs)) + " pull requests:\n" + str(parsed_prs)
)
return parsed_prs_str
else:
return "No open pull requests available"
def list_files_in_main_branch(self) -> str:
"""
Fetches all files in the main branch of the repo.
Returns:
str: A plaintext report containing the paths and names of the files.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents(
"", ref=self.github_base_branch
)
for content in contents:
if content.type == "dir":
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = "\n".join(files)
return f"Found {len(files)} files in the main branch:\n{files_str}"
else:
return "No files found in the main branch"
except Exception as e:
return str(e)
def set_active_branch(self, branch_name: str) -> str:
"""Equivalent to `git checkout branch_name` for this Agent.
Clones formatting from Github.
Returns an Error (as a string) if branch doesn't exist.
"""
curr_branches = [
branch.name for branch in self.github_repo_instance.get_branches()
]
if branch_name in curr_branches:
self.active_branch = branch_name
return f"Switched to branch `{branch_name}`"
else:
return (
f"Error {branch_name} does not exist,"
f"in repo with current branches: {str(curr_branches)}"
)
def list_branches_in_repo(self) -> str:
"""
Fetches a list of all branches in the repository.
Returns:
str: A plaintext report containing the names of the branches.
"""
try:
branches = [
branch.name for branch in self.github_repo_instance.get_branches()
]
if branches:
branches_str = "\n".join(branches)
return (
f"Found {len(branches)} branches in the repository:"
f"\n{branches_str}"
)
else:
return "No branches found in the repository"
except Exception as e:
return str(e)
def create_branch(self, proposed_branch_name: str) -> str:
"""
Create a new branch, and set it as the active bot branch.
Equivalent to `git switch -c proposed_branch_name`
If the proposed branch already exists, we append _v1 then _v2...
until a unique name is found.
Returns:
str: A plaintext success message.
"""
from github import GithubException
i = 0
new_branch_name = proposed_branch_name
base_branch = self.github_repo_instance.get_branch(
self.github_repo_instance.default_branch
)
for i in range(1000):
try:
self.github_repo_instance.create_git_ref(
ref=f"refs/heads/{new_branch_name}", sha=base_branch.commit.sha
)
self.active_branch = new_branch_name
return (
f"Branch '{new_branch_name}' "
"created successfully, and set as current active branch."
)
except GithubException as e:
if e.status == 422 and "Reference already exists" in e.data["message"]:
i += 1
new_branch_name = f"{proposed_branch_name}_v{i}"
else:
# Handle any other exceptions
print(f"Failed to create branch. Error: {e}") # noqa: T201
raise Exception(
"Unable to create branch name from proposed_branch_name: "
f"{proposed_branch_name}"
)
return (
"Unable to create branch. "
"At least 1000 branches exist with named derived from "
f"proposed_branch_name: `{proposed_branch_name}`"
)
def list_files_in_bot_branch(self) -> str:
"""
Fetches all files in the active branch of the repo,
the branch the bot uses to make changes.
Returns:
str: A plaintext list containing the filepaths in the branch.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents(
"", ref=self.active_branch
)
for content in contents:
if content.type == "dir":
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = "\n".join(files)
return (
f"Found {len(files)} files in branch `{self.active_branch}`:\n"
f"{files_str}"
)
else:
return f"No files found in branch: `{self.active_branch}`"
except Exception as e:
return f"Error: {e}"
def get_files_from_directory(self, directory_path: str) -> str:
"""
Recursively fetches files from a directory in the repo.
Parameters:
directory_path (str): Path to the directory
Returns:
str: List of file paths, or an error message.
"""
from github import GithubException
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents(
directory_path, ref=self.active_branch
)
except GithubException as e:
return f"Error: status code {e.status}, {e.message}"
for content in contents:
if content.type == "dir":
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
return str(files)
def get_issue(self, issue_number: int) -> Dict[str, Any]:
"""
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the github issue
Returns:
dict: A dictionary containing the issue's title,
body, comments as a string, and the username of the user
who opened the issue
"""
issue = self.github_repo_instance.get_issue(number=issue_number)
page = 0
comments: List[dict] = []
while len(comments) <= 10:
comments_page = issue.get_comments().get_page(page)
if len(comments_page) == 0:
break
for comment in comments_page:
comments.append({"body": comment.body, "user": comment.user.login})
page += 1
opened_by = None
if issue.user and issue.user.login:
opened_by = issue.user.login
return {
"number": issue_number,
"title": issue.title,
"body": issue.body,
"comments": str(comments),
"opened_by": str(opened_by),
}
def list_pull_request_files(self, pr_number: int) -> List[Dict[str, Any]]:
"""Fetches the full text of all files in a PR. Truncates after first 3k tokens.
# TODO: Enhancement to summarize files with ctags if they're getting long.
Args:
pr_number(int): The number of the pull request on Github
Returns:
dict: A dictionary containing the issue's title,
body, and comments as a string
"""
tiktoken = _import_tiktoken()
MAX_TOKENS_FOR_FILES = 3_000
pr_files = []
pr = self.github_repo_instance.get_pull(number=int(pr_number))
total_tokens = 0
page = 0
while True: # or while (total_tokens + tiktoken()) < MAX_TOKENS_FOR_FILES:
files_page = pr.get_files().get_page(page)
if len(files_page) == 0:
break
for file in files_page:
try:
file_metadata_response = requests.get(file.contents_url)
if file_metadata_response.status_code == 200:
download_url = json.loads(file_metadata_response.text)[
"download_url"
]
else:
print(f"Failed to download file: {file.contents_url}, skipping") # noqa: T201
continue
file_content_response = requests.get(download_url)
if file_content_response.status_code == 200:
# Save the content as a UTF-8 string
file_content = file_content_response.text
else:
print( # noqa: T201
"Failed downloading file content "
f"(Error {file_content_response.status_code}). Skipping"
)
continue
file_tokens = len(
tiktoken.get_encoding("cl100k_base").encode(
file_content + file.filename + "file_name file_contents"
)
)
if (total_tokens + file_tokens) < MAX_TOKENS_FOR_FILES:
pr_files.append(
{
"filename": file.filename,
"contents": file_content,
"additions": file.additions,
"deletions": file.deletions,
}
)
total_tokens += file_tokens
except Exception as e:
print(f"Error when reading files from a PR on github. {e}") # noqa: T201
page += 1
return pr_files
def get_pull_request(self, pr_number: int) -> Dict[str, Any]:
"""
Fetches a specific pull request and its first 10 comments,
limited by max_tokens.
Parameters:
pr_number(int): The number for the Github pull
max_tokens(int): The maximum number of tokens in the response
Returns:
dict: A dictionary containing the pull's title, body,
and comments as a string
"""
max_tokens = 2_000
pull = self.github_repo_instance.get_pull(number=pr_number)
total_tokens = 0
def get_tokens(text: str) -> int:
tiktoken = _import_tiktoken()
return len(tiktoken.get_encoding("cl100k_base").encode(text))
def add_to_dict(data_dict: Dict[str, Any], key: str, value: str) -> None:
nonlocal total_tokens # Declare total_tokens as nonlocal
tokens = get_tokens(value)
if total_tokens + tokens <= max_tokens:
data_dict[key] = value
total_tokens += tokens # Now this will modify the outer variable
response_dict: Dict[str, str] = {}
add_to_dict(response_dict, "title", pull.title)
add_to_dict(response_dict, "number", str(pr_number))
add_to_dict(response_dict, "body", pull.body if pull.body else "")
comments: List[str] = []
page = 0
while len(comments) <= 10:
comments_page = pull.get_issue_comments().get_page(page)
if len(comments_page) == 0:
break
for comment in comments_page:
comment_str = str({"body": comment.body, "user": comment.user.login})
if total_tokens + get_tokens(comment_str) > max_tokens:
break
comments.append(comment_str)
total_tokens += get_tokens(comment_str)
page += 1
add_to_dict(response_dict, "comments", str(comments))
commits: List[str] = []
page = 0
while len(commits) <= 10:
commits_page = pull.get_commits().get_page(page)
if len(commits_page) == 0:
break
for commit in commits_page:
commit_str = str({"message": commit.commit.message})
if total_tokens + get_tokens(commit_str) > max_tokens:
break
commits.append(commit_str)
total_tokens += get_tokens(commit_str)
page += 1
add_to_dict(response_dict, "commits", str(commits))
return response_dict
def create_pull_request(self, pr_query: str) -> str:
"""
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README\nmade changes to add info"
Returns:
str: A success or failure message
"""
if self.github_base_branch == self.active_branch:
return """Cannot make a pull request because
commits are already in the main or master branch."""
else:
try:
title = pr_query.split("\n")[0]
body = pr_query[len(title) + 2 :]
pr = self.github_repo_instance.create_pull(
title=title,
body=body,
head=self.active_branch,
base=self.github_base_branch,
)
return f"Successfully created PR number {pr.number}"
except Exception as e:
return "Unable to make pull request due to error:\n" + str(e)
def comment_on_issue(self, comment_query: str) -> str:
"""
Adds a comment to a github issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1\n\nWorking on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
"""
issue_number = int(comment_query.split("\n\n")[0])
comment = comment_query[len(str(issue_number)) + 2 :]
try:
issue = self.github_repo_instance.get_issue(number=issue_number)
issue.create_comment(comment)
return "Commented on issue " + str(issue_number)
except Exception as e:
return "Unable to make comment due to error:\n" + str(e)
def create_file(self, file_query: str) -> str:
"""
Creates a new file on the Github repo
Parameters:
file_query(str): a string which contains the file path
and the file contents. The file path is the first line
in the string, and the contents are the rest of the string.
For example, "hello_world.md\n# Hello World!"
Returns:
str: A success or failure message
"""
if self.active_branch == self.github_base_branch:
return (
"You're attempting to commit to the directly to the"
f"{self.github_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
file_path = file_query.split("\n")[0]
file_contents = file_query[len(file_path) + 2 :]
try:
try:
file = self.github_repo_instance.get_contents(
file_path, ref=self.active_branch
)
if file:
return (
f"File already exists at `{file_path}` "
f"on branch `{self.active_branch}`. You must use "
"`update_file` to modify it."
)
except Exception:
# expected behavior, file shouldn't exist yet
pass
self.github_repo_instance.create_file(
path=file_path,
message="Create " + file_path,
content=file_contents,
branch=self.active_branch,
)
return "Created file " + file_path
except Exception as e:
return "Unable to make file due to error:\n" + str(e)
def read_file(self, file_path: str) -> str:
"""
Read a file from this agent's branch, defined by self.active_branch,
which supports PR branches.
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string, or an error message if not found
"""
try:
file = self.github_repo_instance.get_contents(
file_path, ref=self.active_branch
)
return file.decoded_content.decode("utf-8")
except Exception as e:
return (
f"File not found `{file_path}` on branch"
f"`{self.active_branch}`. Error: {str(e)}"
)
def update_file(self, file_query: str) -> str:
"""
Updates a file with new content.
Parameters:
file_query(str): Contains the file path and the file contents.
The old file contents is wrapped in OLD <<<< and >>>> OLD
The new file contents is wrapped in NEW <<<< and >>>> NEW
For example:
/test/hello.txt
OLD <<<<
Hello Earth!
>>>> OLD
NEW <<<<
Hello Mars!
>>>> NEW
Returns:
A success or failure message
"""
if self.active_branch == self.github_base_branch:
return (
"You're attempting to commit to the directly"
f"to the {self.github_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
try:
file_path: str = file_query.split("\n")[0]
old_file_contents = (
file_query.split("OLD <<<<")[1].split(">>>> OLD")[0].strip()
)
new_file_contents = (
file_query.split("NEW <<<<")[1].split(">>>> NEW")[0].strip()
)
file_content = self.read_file(file_path)
updated_file_content = file_content.replace(
old_file_contents, new_file_contents
)
if file_content == updated_file_content:
return (
"File content was not updated because old content was not found."
"It may be helpful to use the read_file action to get "
"the current file contents."
)
self.github_repo_instance.update_file(
path=file_path,
message="Update " + str(file_path),
content=updated_file_content,
branch=self.active_branch,
sha=self.github_repo_instance.get_contents(
file_path, ref=self.active_branch
).sha,
)
return "Updated file " + str(file_path)
except Exception as e:
return "Unable to update file due to error:\n" + str(e)
def delete_file(self, file_path: str) -> str:
"""
Deletes a file from the repo
Parameters:
file_path(str): Where the file is
Returns:
str: Success or failure message
"""
if self.active_branch == self.github_base_branch:
return (
"You're attempting to commit to the directly"
f"to the {self.github_base_branch} branch, which is protected. "
"Please create a new branch and try again."
)
try:
self.github_repo_instance.delete_file(
path=file_path,
message="Delete " + file_path,
branch=self.active_branch,
sha=self.github_repo_instance.get_contents(
file_path, ref=self.active_branch
).sha,
)
return "Deleted file " + file_path
except Exception as e:
return "Unable to delete file due to error:\n" + str(e)
def search_issues_and_prs(self, query: str) -> str:
"""
Searches issues and pull requests in the repository.
Parameters:
query(str): The search query
Returns:
str: A string containing the first 5 issues and pull requests
"""
search_result = self.github.search_issues(query, repo=self.github_repository)
max_items = min(5, search_result.totalCount)
results = [f"Top {max_items} results:"]
for issue in search_result[:max_items]:
results.append(
f"Title: {issue.title}, Number: {issue.number}, State: {issue.state}"
)
return "\n".join(results)
def search_code(self, query: str) -> str:
"""
Searches code in the repository.
# Todo: limit total tokens returned...
Parameters:
query(str): The search query
Returns:
str: A string containing, at most, the top 5 search results
"""
search_result = self.github.search_code(
query=query, repo=self.github_repository
)
if search_result.totalCount == 0:
return "0 results found."
max_results = min(5, search_result.totalCount)
results = [f"Showing top {max_results} of {search_result.totalCount} results:"]
count = 0
for code in search_result:
if count >= max_results:
break
# Get the file content using the PyGithub get_contents method
file_content = self.github_repo_instance.get_contents(
code.path, ref=self.active_branch
).decoded_content.decode()
results.append(
f"Filepath: `{code.path}`\nFile contents: "
f"{file_content}\n<END OF FILE>"
)
count += 1
return "\n".join(results)
def create_review_request(self, reviewer_username: str) -> str:
"""
Creates a review request on *THE* open pull request
that matches the current active_branch.
Parameters:
reviewer_username(str): The username of the person who is being requested
Returns:
str: A message confirming the creation of the review request
"""
pull_requests = self.github_repo_instance.get_pulls(
state="open", sort="created"
)
# find PR against active_branch
pr = next(
(pr for pr in pull_requests if pr.head.ref == self.active_branch), None
)
if pr is None:
return (
"No open pull request found for the "
f"current branch `{self.active_branch}`"
)
try:
pr.create_review_request(reviewers=[reviewer_username])
return (
f"Review request created for user {reviewer_username} "
f"on PR #{pr.number}"
)
except Exception as e:
return f"Failed to create a review request with error {e}"
def run(self, mode: str, query: str) -> str:
if mode == "get_issue":
return json.dumps(self.get_issue(int(query)))
elif mode == "get_pull_request":
return json.dumps(self.get_pull_request(int(query)))
elif mode == "list_pull_request_files":
return json.dumps(self.list_pull_request_files(int(query)))
elif mode == "get_issues":
return self.get_issues()
elif mode == "comment_on_issue":
return self.comment_on_issue(query)
elif mode == "create_file":
return self.create_file(query)
elif mode == "create_pull_request":
return self.create_pull_request(query)
elif mode == "read_file":
return self.read_file(query)
elif mode == "update_file":
return self.update_file(query)
elif mode == "delete_file":
return self.delete_file(query)
elif mode == "list_open_pull_requests":
return self.list_open_pull_requests()
elif mode == "list_files_in_main_branch":
return self.list_files_in_main_branch()
elif mode == "list_files_in_bot_branch":
return self.list_files_in_bot_branch()
elif mode == "list_branches_in_repo":
return self.list_branches_in_repo()
elif mode == "set_active_branch":
return self.set_active_branch(query)
elif mode == "create_branch":
return self.create_branch(query)
elif mode == "get_files_from_directory":
return self.get_files_from_directory(query)
elif mode == "search_issues_and_prs":
return self.search_issues_and_prs(query)
elif mode == "search_code":
return self.search_code(query)
elif mode == "create_review_request":
return self.create_review_request(query)
else:
raise ValueError("Invalid mode" + mode)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/stackexchange.py | import html
from typing import Any, Dict, Literal
from pydantic import BaseModel, Field, model_validator
class StackExchangeAPIWrapper(BaseModel):
"""Wrapper for Stack Exchange API."""
client: Any = None #: :meta private:
max_results: int = 3
"""Max number of results to include in output."""
query_type: Literal["all", "title", "body"] = "all"
"""Which part of StackOverflows items to match against. One of 'all', 'title',
'body'. Defaults to 'all'.
"""
fetch_params: Dict[str, Any] = Field(default_factory=dict)
"""Additional params to pass to StackApi.fetch."""
result_separator: str = "\n\n"
"""Separator between question,answer pairs."""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the required Python package exists."""
try:
from stackapi import StackAPI
values["client"] = StackAPI("stackoverflow")
except ImportError:
raise ImportError(
"The 'stackapi' Python package is not installed. "
"Please install it with `pip install stackapi`."
)
return values
def run(self, query: str) -> str:
"""Run query through StackExchange API and parse results."""
query_key = "q" if self.query_type == "all" else self.query_type
output = self.client.fetch(
"search/excerpts", **{query_key: query}, **self.fetch_params
)
if len(output["items"]) < 1:
return f"No relevant results found for '{query}' on Stack Overflow."
questions = [
item for item in output["items"] if item["item_type"] == "question"
][: self.max_results]
answers = [item for item in output["items"] if item["item_type"] == "answer"]
results = []
for question in questions:
res_text = f"Question: {question['title']}\n{question['excerpt']}"
relevant_answers = [
answer
for answer in answers
if answer["question_id"] == question["question_id"]
]
accepted_answers = [
answer for answer in relevant_answers if answer["is_accepted"]
]
if relevant_answers:
top_answer = (
accepted_answers[0] if accepted_answers else relevant_answers[0]
)
excerpt = html.unescape(top_answer["excerpt"])
res_text += f"\nAnswer: {excerpt}"
results.append(res_text)
return self.result_separator.join(results)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/twilio.py | """Util that calls Twilio."""
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class TwilioAPIWrapper(BaseModel):
"""Messaging Client using Twilio.
To use, you should have the ``twilio`` python package installed,
and the environment variables ``TWILIO_ACCOUNT_SID``, ``TWILIO_AUTH_TOKEN``, and
``TWILIO_FROM_NUMBER``, or pass `account_sid`, `auth_token`, and `from_number` as
named parameters to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
"""
client: Any = None #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None
"""Twilio auth token."""
from_number: Optional[str] = None
"""A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using `messaging_service_sid`, this parameter
must be empty.
"""
model_config = ConfigDict(
arbitrary_types_allowed=False,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
try:
from twilio.rest import Client
except ImportError:
raise ImportError(
"Could not import twilio python package. "
"Please install it with `pip install twilio`."
)
account_sid = get_from_dict_or_env(values, "account_sid", "TWILIO_ACCOUNT_SID")
auth_token = get_from_dict_or_env(values, "auth_token", "TWILIO_AUTH_TOKEN")
values["from_number"] = get_from_dict_or_env(
values, "from_number", "TWILIO_FROM_NUMBER"
)
values["client"] = Client(account_sid, auth_token)
return values
def run(self, body: str, to: str) -> str:
"""Run body through Twilio and respond with message sid.
Args:
body: The text of the message you want to send. Can be up to 1,600
characters in length.
to: The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
"""
message = self.client.messages.create(to, from_=self.from_number, body=body)
return message.sid
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/clickup.py | """Util that calls clickup."""
import json
import warnings
from dataclasses import asdict, dataclass, fields
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type, Union
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
DEFAULT_URL = "https://api.clickup.com/api/v2"
@dataclass
class Component:
"""Base class for all components."""
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Component":
raise NotImplementedError()
@dataclass
class Task(Component):
"""Class for a task."""
id: int
name: str
text_content: str
description: str
status: str
creator_id: int
creator_username: str
creator_email: str
assignees: List[Dict[str, Any]]
watchers: List[Dict[str, Any]]
priority: Optional[str]
due_date: Optional[str]
start_date: Optional[str]
points: int
team_id: int
project_id: int
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Task":
priority = None if data["priority"] is None else data["priority"]["priority"]
return cls(
id=data["id"],
name=data["name"],
text_content=data["text_content"],
description=data["description"],
status=data["status"]["status"],
creator_id=data["creator"]["id"],
creator_username=data["creator"]["username"],
creator_email=data["creator"]["email"],
assignees=data["assignees"],
watchers=data["watchers"],
priority=priority,
due_date=data["due_date"],
start_date=data["start_date"],
points=data["points"],
team_id=data["team_id"],
project_id=data["project"]["id"],
)
@dataclass
class CUList(Component):
"""Component class for a list."""
folder_id: float
name: str
content: Optional[str] = None
due_date: Optional[int] = None
due_date_time: Optional[bool] = None
priority: Optional[int] = None
assignee: Optional[int] = None
status: Optional[str] = None
@classmethod
def from_data(cls, data: dict) -> "CUList":
return cls(
folder_id=data["folder_id"],
name=data["name"],
content=data.get("content"),
due_date=data.get("due_date"),
due_date_time=data.get("due_date_time"),
priority=data.get("priority"),
assignee=data.get("assignee"),
status=data.get("status"),
)
@dataclass
class Member(Component):
"""Component class for a member."""
id: int
username: str
email: str
initials: str
@classmethod
def from_data(cls, data: Dict) -> "Member":
return cls(
id=data["user"]["id"],
username=data["user"]["username"],
email=data["user"]["email"],
initials=data["user"]["initials"],
)
@dataclass
class Team(Component):
"""Component class for a team."""
id: int
name: str
members: List[Member]
@classmethod
def from_data(cls, data: Dict) -> "Team":
members = [Member.from_data(member_data) for member_data in data["members"]]
return cls(id=data["id"], name=data["name"], members=members)
@dataclass
class Space(Component):
"""Component class for a space."""
id: int
name: str
private: bool
enabled_features: Dict[str, Any]
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Space":
space_data = data["spaces"][0]
enabled_features = {
feature: value
for feature, value in space_data["features"].items()
if value["enabled"]
}
return cls(
id=space_data["id"],
name=space_data["name"],
private=space_data["private"],
enabled_features=enabled_features,
)
def parse_dict_through_component(
data: dict, component: Type[Component], fault_tolerant: bool = False
) -> Dict:
"""Parse a dictionary by creating
a component and then turning it back into a dictionary.
This helps with two things
1. Extract and format data from a dictionary according to schema
2. Provide a central place to do this in a fault-tolerant way
"""
try:
return asdict(component.from_data(data))
except Exception as e:
if fault_tolerant:
warning_str = f"""Error encountered while trying to parse
{str(data)}: {str(e)}\n Falling back to returning input data."""
warnings.warn(warning_str)
return data
else:
raise e
def extract_dict_elements_from_component_fields(
data: dict, component: Type[Component]
) -> dict:
"""Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
"""
output = {}
for attribute in fields(component):
if attribute.name in data:
output[attribute.name] = data[attribute.name]
return output
def load_query(
query: str, fault_tolerant: bool = False
) -> Tuple[Optional[Dict], Optional[str]]:
"""Parse a JSON string and return the parsed object.
If parsing fails, returns an error message.
:param query: The JSON string to parse.
:return: A tuple containing the parsed object or None and an error message or None.
Exceptions:
json.JSONDecodeError: If the input is not a valid JSON string.
"""
try:
return json.loads(query), None
except json.JSONDecodeError as e:
if fault_tolerant:
return (
None,
f"""Input must be a valid JSON. Got the following error: {str(e)}.
"Please reformat and try again.""",
)
else:
raise e
def fetch_first_id(data: dict, key: str) -> Optional[int]:
"""Fetch the first id from a dictionary."""
if key in data and len(data[key]) > 0:
if len(data[key]) > 1:
warnings.warn(f"Found multiple {key}: {data[key]}. Defaulting to first.")
return data[key][0]["id"]
return None
def fetch_data(url: str, access_token: str, query: Optional[dict] = None) -> dict:
"""Fetch data from a URL."""
headers = {"Authorization": access_token}
response = requests.get(url, headers=headers, params=query)
response.raise_for_status()
return response.json()
def fetch_team_id(access_token: str) -> Optional[int]:
"""Fetch the team id."""
url = f"{DEFAULT_URL}/team"
data = fetch_data(url, access_token)
return fetch_first_id(data, "teams")
def fetch_space_id(team_id: int, access_token: str) -> Optional[int]:
"""Fetch the space id."""
url = f"{DEFAULT_URL}/team/{team_id}/space"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "spaces")
def fetch_folder_id(space_id: int, access_token: str) -> Optional[int]:
"""Fetch the folder id."""
url = f"{DEFAULT_URL}/space/{space_id}/folder"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "folders")
def fetch_list_id(space_id: int, folder_id: int, access_token: str) -> Optional[int]:
"""Fetch the list id."""
if folder_id:
url = f"{DEFAULT_URL}/folder/{folder_id}/list"
else:
url = f"{DEFAULT_URL}/space/{space_id}/list"
data = fetch_data(url, access_token, query={"archived": "false"})
# The structure to fetch list id differs based if its folderless
if folder_id and "id" in data:
return data["id"]
else:
return fetch_first_id(data, "lists")
class ClickupAPIWrapper(BaseModel):
"""Wrapper for Clickup API."""
access_token: Optional[str] = None
team_id: Optional[str] = None
space_id: Optional[str] = None
folder_id: Optional[str] = None
list_id: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def get_access_code_url(
cls, oauth_client_id: str, redirect_uri: str = "https://google.com"
) -> str:
"""Get the URL to get an access code."""
url = f"https://app.clickup.com/api?client_id={oauth_client_id}"
return f"{url}&redirect_uri={redirect_uri}"
@classmethod
def get_access_token(
cls, oauth_client_id: str, oauth_client_secret: str, code: str
) -> Optional[str]:
"""Get the access token."""
url = f"{DEFAULT_URL}/oauth/token"
params = {
"client_id": oauth_client_id,
"client_secret": oauth_client_secret,
"code": code,
}
response = requests.post(url, params=params)
data = response.json()
if "access_token" not in data:
print(f"Error: {data}") # noqa: T201
if "ECODE" in data and data["ECODE"] == "OAUTH_014":
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
print( # noqa: T201
"You already used this code once. Generate a new one.",
f"Our best guess for the url to get a new code is:\n{url}",
)
return None
return data["access_token"]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["access_token"] = get_from_dict_or_env(
values, "access_token", "CLICKUP_ACCESS_TOKEN"
)
values["team_id"] = fetch_team_id(values["access_token"])
values["space_id"] = fetch_space_id(values["team_id"], values["access_token"])
values["folder_id"] = fetch_folder_id(
values["space_id"], values["access_token"]
)
values["list_id"] = fetch_list_id(
values["space_id"], values["folder_id"], values["access_token"]
)
return values
def attempt_parse_teams(self, input_dict: dict) -> Dict[str, List[dict]]:
"""Parse appropriate content from the list of teams."""
parsed_teams: Dict[str, List[dict]] = {"teams": []}
for team in input_dict["teams"]:
try:
team = parse_dict_through_component(team, Team, fault_tolerant=False)
parsed_teams["teams"].append(team)
except Exception as e:
warnings.warn(f"Error parsing a team {e}")
return parsed_teams
def get_headers(
self,
) -> Mapping[str, Union[str, bytes]]:
"""Get the headers for the request."""
if not isinstance(self.access_token, str):
raise TypeError(f"Access Token: {self.access_token}, must be str.")
headers = {
"Authorization": str(self.access_token),
"Content-Type": "application/json",
}
return headers
def get_default_params(self) -> Dict:
return {"archived": "false"}
def get_authorized_teams(self) -> Dict[Any, Any]:
"""Get all teams for the user."""
url = f"{DEFAULT_URL}/team"
response = requests.get(url, headers=self.get_headers())
data = response.json()
parsed_teams = self.attempt_parse_teams(data)
return parsed_teams
def get_folders(self) -> Dict:
"""
Get all the folders for the team.
"""
url = f"{DEFAULT_URL}/team/" + str(self.team_id) + "/space"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_task(self, query: str, fault_tolerant: bool = True) -> Dict:
"""
Retrieve a specific task.
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{params['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
response = requests.get(url, headers=self.get_headers(), params=params)
data = response.json()
parsed_task = parse_dict_through_component(
data, Task, fault_tolerant=fault_tolerant
)
return parsed_task
def get_lists(self) -> Dict:
"""
Get all available lists.
"""
url = f"{DEFAULT_URL}/folder/{self.folder_id}/list"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def query_tasks(self, query: str) -> Dict:
"""
Query tasks that match certain fields
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/list/{params['list_id']}/task"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_spaces(self) -> Dict:
"""
Get all spaces for the team.
"""
url = f"{DEFAULT_URL}/team/{self.team_id}/space"
response = requests.get(
url, headers=self.get_headers(), params=self.get_default_params()
)
data = response.json()
parsed_spaces = parse_dict_through_component(data, Space, fault_tolerant=True)
return parsed_spaces
def get_task_attribute(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
task = self.get_task(query, fault_tolerant=True)
params, error = load_query(query, fault_tolerant=True)
if not isinstance(params, dict):
return {"Error": error}
if params["attribute_name"] not in task:
return {
"Error": f"""attribute_name = {params['attribute_name']} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
return {params["attribute_name"]: task[params["attribute_name"]]}
def update_task(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
headers = self.get_headers()
payload = {query_dict["attribute_name"]: query_dict["value"]}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def update_task_assignees(self, query: str) -> Dict:
"""
Add or remove assignees of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
for user in query_dict["users"]:
if not isinstance(user, int):
return {
"Error": f"""All users must be integers, not strings!
"Got user {user} if type {type(user)}"""
}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
headers = self.get_headers()
if query_dict["operation"] == "add":
assigne_payload = {"add": query_dict["users"], "rem": []}
elif query_dict["operation"] == "rem":
assigne_payload = {"add": [], "rem": query_dict["users"]}
else:
raise ValueError(
f"Invalid operation ({query_dict['operation']}). ",
"Valid options ['add', 'rem'].",
)
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
payload = {"assignees": assigne_payload}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def create_task(self, query: str) -> Dict:
"""
Creates a new task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
list_id = self.list_id
url = f"{DEFAULT_URL}/list/{list_id}/task"
params = {"custom_task_ids": "true", "team_id": self.team_id}
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers, params=params)
data: Dict = response.json()
return parse_dict_through_component(data, Task, fault_tolerant=True)
def create_list(self, query: str) -> Dict:
"""
Creates a new list.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
# Default to using folder as location if it exists.
# If not, fall back to using the space.
location = self.folder_id if self.folder_id else self.space_id
url = f"{DEFAULT_URL}/folder/{location}/list"
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
parsed_list = parse_dict_through_component(data, CUList, fault_tolerant=True)
# set list id to new list
if "id" in parsed_list:
self.list_id = parsed_list["id"]
return parsed_list
def create_folder(self, query: str) -> Dict:
"""
Creates a new folder.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
space_id = self.space_id
url = f"{DEFAULT_URL}/space/{space_id}/folder"
payload = {
"name": query_dict["name"],
}
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
if "id" in data:
self.list_id = data["id"]
return data
def run(self, mode: str, query: str) -> str:
"""Run the API."""
if mode == "get_task":
output = self.get_task(query)
elif mode == "get_task_attribute":
output = self.get_task_attribute(query)
elif mode == "get_teams":
output = self.get_authorized_teams()
elif mode == "create_task":
output = self.create_task(query)
elif mode == "create_list":
output = self.create_list(query)
elif mode == "create_folder":
output = self.create_folder(query)
elif mode == "get_lists":
output = self.get_lists()
elif mode == "get_folders":
output = self.get_folders()
elif mode == "get_spaces":
output = self.get_spaces()
elif mode == "update_task":
output = self.update_task(query)
elif mode == "update_task_assignees":
output = self.update_task_assignees(query)
else:
output = {"ModeError": f"Got unexpected mode {mode}."}
try:
return json.dumps(output)
except Exception:
return str(output)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/sql_database.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Union
import sqlalchemy
from langchain_core._api import deprecated
from langchain_core.utils import get_from_env
from sqlalchemy import (
MetaData,
Table,
create_engine,
inspect,
select,
text,
)
from sqlalchemy.engine import URL, Engine, Result
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql.expression import Executable
from sqlalchemy.types import NullType
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
lazy_table_reflection: bool = False,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._view_support = view_support
self._metadata = metadata or MetaData()
if not lazy_table_reflection:
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls,
database_uri: Union[str, URL],
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ImportError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
default_host = context.browserHostName
except (ImportError, AttributeError):
default_host = None
if host is None:
host = get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@classmethod
def from_cnosdb(
cls,
url: str = "127.0.0.1:8902",
user: str = "root",
password: str = "",
tenant: str = "cnosdb",
database: str = "public",
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a CnosDB connection.
This method requires the 'cnos-connector' package. If not installed, it
can be added using `pip install cnos-connector`.
Args:
url (str): The HTTP connection host name and port number of the CnosDB
service, excluding "http://" or "https://", with a default value
of "127.0.0.1:8902".
user (str): The username used to connect to the CnosDB service, with a
default value of "root".
password (str): The password of the user connecting to the CnosDB service,
with a default value of "".
tenant (str): The name of the tenant used to connect to the CnosDB service,
with a default value of "cnosdb".
database (str): The name of the database in the CnosDB tenant.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
CnosDB connection details.
"""
try:
from cnosdb_connector import make_cnosdb_langchain_uri
uri = make_cnosdb_langchain_uri(url, user, password, tenant, database)
return cls.from_uri(database_uri=uri)
except ImportError:
raise ImportError(
"cnos-connector package not found, please install with"
" `pip install cnos-connector`"
)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
@deprecated("0.0.1", alternative="get_usable_table_names", removal="1.0")
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
metadata_table_names = [tbl.name for tbl in self._metadata.sorted_tables]
to_reflect = set(all_table_names) - set(metadata_table_names)
if to_reflect:
self._metadata.reflect(
views=self._view_support,
bind=self._engine,
only=list(to_reflect),
schema=self._schema,
)
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# Ignore JSON datatyped columns
for k, v in table.columns.items(): # AttributeError: items in sqlalchemy v1
if type(v.type) is NullType:
table._columns.remove(v)
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _execute(
self,
command: Union[str, Executable],
fetch: Literal["all", "one", "cursor"] = "all",
*,
parameters: Optional[Dict[str, Any]] = None,
execution_options: Optional[Dict[str, Any]] = None,
) -> Union[Sequence[Dict[str, Any]], Result]:
"""
Executes SQL command through underlying engine.
If the statement returns no rows, an empty list is returned.
"""
parameters = parameters or {}
execution_options = execution_options or {}
with self._engine.begin() as connection: # type: Connection # type: ignore[name-defined]
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
"ALTER SESSION SET search_path = %s",
(self._schema,),
execution_options=execution_options,
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(
"SET @@dataset_id=?",
(self._schema,),
execution_options=execution_options,
)
elif self.dialect == "mssql":
pass
elif self.dialect == "trino":
connection.exec_driver_sql(
"USE ?",
(self._schema,),
execution_options=execution_options,
)
elif self.dialect == "duckdb":
# Unclear which parameterized argument syntax duckdb supports.
# The docs for the duckdb client say they support multiple,
# but `duckdb_engine` seemed to struggle with all of them:
# https://github.com/Mause/duckdb_engine/issues/796
connection.exec_driver_sql(
f"SET search_path TO {self._schema}",
execution_options=execution_options,
)
elif self.dialect == "oracle":
connection.exec_driver_sql(
f"ALTER SESSION SET CURRENT_SCHEMA = {self._schema}",
execution_options=execution_options,
)
elif self.dialect == "sqlany":
# If anybody using Sybase SQL anywhere database then it should not
# go to else condition. It should be same as mssql.
pass
elif self.dialect == "postgresql": # postgresql
connection.exec_driver_sql(
"SET search_path TO %s",
(self._schema,),
execution_options=execution_options,
)
if isinstance(command, str):
command = text(command)
elif isinstance(command, Executable):
pass
else:
raise TypeError(f"Query expression has unknown type: {type(command)}")
cursor = connection.execute(
command,
parameters,
execution_options=execution_options,
)
if cursor.returns_rows:
if fetch == "all":
result = [x._asdict() for x in cursor.fetchall()]
elif fetch == "one":
first_result = cursor.fetchone()
result = [] if first_result is None else [first_result._asdict()]
elif fetch == "cursor":
return cursor
else:
raise ValueError(
"Fetch parameter must be either 'one', 'all', or 'cursor'"
)
return result
return []
def run(
self,
command: Union[str, Executable],
fetch: Literal["all", "one", "cursor"] = "all",
include_columns: bool = False,
*,
parameters: Optional[Dict[str, Any]] = None,
execution_options: Optional[Dict[str, Any]] = None,
) -> Union[str, Sequence[Dict[str, Any]], Result[Any]]:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
result = self._execute(
command, fetch, parameters=parameters, execution_options=execution_options
)
if fetch == "cursor":
return result
res = [
{
column: truncate_word(value, length=self._max_string_length)
for column, value in r.items()
}
for r in result
]
if not include_columns:
res = [tuple(row.values()) for row in res] # type: ignore[misc]
if not res:
return ""
else:
return str(res)
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(
self,
command: str,
fetch: Literal["all", "one"] = "all",
include_columns: bool = False,
*,
parameters: Optional[Dict[str, Any]] = None,
execution_options: Optional[Dict[str, Any]] = None,
) -> Union[str, Sequence[Dict[str, Any]], Result[Any]]:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(
command,
fetch,
parameters=parameters,
execution_options=execution_options,
include_columns=include_columns,
)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
def get_context(self) -> Dict[str, Any]:
"""Return db context that you may want in agent prompt."""
table_names = list(self.get_usable_table_names())
table_info = self.get_table_info_no_throw()
return {"table_info": table_info, "table_names": ", ".join(table_names)}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/alpha_vantage.py | """Util that calls AlphaVantage for Currency Exchange Rate."""
from typing import Any, Dict, List, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class AlphaVantageAPIWrapper(BaseModel):
"""Wrapper for AlphaVantage API for Currency Exchange Rate.
Docs for using:
1. Go to AlphaVantage and sign up for an API key
2. Save your API KEY into ALPHAVANTAGE_API_KEY env variable
"""
alphavantage_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
values["alphavantage_api_key"] = get_from_dict_or_env(
values, "alphavantage_api_key", "ALPHAVANTAGE_API_KEY"
)
return values
def search_symbols(self, keywords: str) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to search for symbols."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "SYMBOL_SEARCH",
"keywords": keywords,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_market_news_sentiment(self, symbol: str) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get market news sentiment for a
given symbol."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "NEWS_SENTIMENT",
"symbol": symbol,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_time_series_daily(self, symbol: str) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get the daily time series."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "TIME_SERIES_DAILY",
"symbol": symbol,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_quote_endpoint(self, symbol: str) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get the
latest price and volume information."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "GLOBAL_QUOTE",
"symbol": symbol,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_time_series_weekly(self, symbol: str) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API
to get the Weekly Time Series."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "TIME_SERIES_WEEKLY",
"symbol": symbol,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_top_gainers_losers(self) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get the top gainers, losers,
and most actively traded tickers in the US market."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "TOP_GAINERS_LOSERS",
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
def _get_exchange_rate(
self, from_currency: str, to_currency: str
) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get the exchange rate."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "CURRENCY_EXCHANGE_RATE",
"from_currency": from_currency,
"to_currency": to_currency,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
@property
def standard_currencies(self) -> List[str]:
return ["USD", "EUR", "GBP", "JPY", "CHF", "CAD", "AUD", "NZD"]
def run(self, from_currency: str, to_currency: str) -> str:
"""Get the current exchange rate for a specified currency pair."""
if to_currency not in self.standard_currencies:
from_currency, to_currency = to_currency, from_currency
data = self._get_exchange_rate(from_currency, to_currency)
return data["Realtime Currency Exchange Rate"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/asknews.py | """Util that calls AskNews api."""
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class AskNewsAPIWrapper(BaseModel):
"""Wrapper for AskNews API."""
asknews_sync: Any = None #: :meta private:
asknews_async: Any = None #: :meta private:
asknews_client_id: Optional[str] = None
"""Client ID for the AskNews API."""
asknews_client_secret: Optional[str] = None
"""Client Secret for the AskNews API."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api credentials and python package exists in environment."""
asknews_client_id = get_from_dict_or_env(
values, "asknews_client_id", "ASKNEWS_CLIENT_ID"
)
asknews_client_secret = get_from_dict_or_env(
values, "asknews_client_secret", "ASKNEWS_CLIENT_SECRET"
)
try:
import asknews_sdk
except ImportError:
raise ImportError(
"AskNews python package not found. "
"Please install it with `pip install asknews`."
)
an_sync = asknews_sdk.AskNewsSDK(
client_id=asknews_client_id,
client_secret=asknews_client_secret,
scopes=["news"],
)
an_async = asknews_sdk.AsyncAskNewsSDK(
client_id=asknews_client_id,
client_secret=asknews_client_secret,
scopes=["news"],
)
values["asknews_sync"] = an_sync
values["asknews_async"] = an_async
values["asknews_client_id"] = asknews_client_id
values["asknews_client_secret"] = asknews_client_secret
return values
def search_news(
self, query: str, max_results: int = 10, hours_back: int = 0
) -> str:
"""Search news in AskNews API synchronously."""
if hours_back > 48:
method = "kw"
historical = True
start = int((datetime.now() - timedelta(hours=hours_back)).timestamp())
stop = int(datetime.now().timestamp())
else:
historical = False
method = "nl"
start = None
stop = None
response = self.asknews_sync.news.search_news(
query=query,
n_articles=max_results,
method=method,
historical=historical,
start_timestamp=start,
end_timestamp=stop,
return_type="string",
)
return response.as_string
async def asearch_news(
self, query: str, max_results: int = 10, hours_back: int = 0
) -> str:
"""Search news in AskNews API asynchronously."""
if hours_back > 48:
method = "kw"
historical = True
start = int((datetime.now() - timedelta(hours=hours_back)).timestamp())
stop = int(datetime.now().timestamp())
else:
historical = False
method = "nl"
start = None
stop = None
response = await self.asknews_async.news.search_news(
query=query,
n_articles=max_results,
method=method,
historical=historical,
start_timestamp=start,
end_timestamp=stop,
return_type="string",
)
return response.as_string
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/dria_index.py | import logging
from typing import Any, Dict, List, Optional, Union
logger = logging.getLogger(__name__)
class DriaAPIWrapper:
"""Wrapper around Dria API.
This wrapper facilitates interactions with Dria's vector search
and retrieval services, including creating knowledge bases, inserting data,
and fetching search results.
Attributes:
api_key: Your API key for accessing Dria.
contract_id: The contract ID of the knowledge base to interact with.
top_n: Number of top results to fetch for a search.
"""
def __init__(
self, api_key: str, contract_id: Optional[str] = None, top_n: int = 10
):
try:
from dria import Dria, Models
except ImportError:
logger.error(
"""Dria is not installed. Please install Dria to use this wrapper.
You can install Dria using the following command:
pip install dria
"""
)
return
self.api_key = api_key
self.models = Models
self.contract_id = contract_id
self.top_n = top_n
self.dria_client = Dria(api_key=self.api_key)
if self.contract_id:
self.dria_client.set_contract(self.contract_id)
def create_knowledge_base(
self,
name: str,
description: str,
category: str,
embedding: str,
) -> str:
"""Create a new knowledge base."""
contract_id = self.dria_client.create(
name=name, embedding=embedding, category=category, description=description
)
logger.info(f"Knowledge base created with ID: {contract_id}")
self.contract_id = contract_id
return contract_id
def insert_data(self, data: List[Dict[str, Any]]) -> str:
"""Insert data into the knowledge base."""
response = self.dria_client.insert_text(data)
logger.info(f"Data inserted: {response}")
return response
def search(self, query: str) -> List[Dict[str, Any]]:
"""Perform a text-based search."""
results = self.dria_client.search(query, top_n=self.top_n)
logger.info(f"Search results: {results}")
return results
def query_with_vector(self, vector: List[float]) -> List[Dict[str, Any]]:
"""Perform a vector-based query."""
vector_query_results = self.dria_client.query(vector, top_n=self.top_n)
logger.info(f"Vector query results: {vector_query_results}")
return vector_query_results
def run(self, query: Union[str, List[float]]) -> Optional[List[Dict[str, Any]]]:
"""Method to handle both text-based searches and vector-based queries.
Args:
query: A string for text-based search or a list of floats for
vector-based query.
Returns:
The search or query results from Dria.
"""
if isinstance(query, str):
return self.search(query)
elif isinstance(query, list) and all(isinstance(item, float) for item in query):
return self.query_with_vector(query)
else:
logger.error(
"""Invalid query type. Please provide a string for text search or a
list of floats for vector query."""
)
return None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to nla@zapier.com for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from requests import Request, Session
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
model_config = ConfigDict(
extra="forbid",
)
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}",
response=response,
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}",
response=response,
)
raise http_err
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/python.py | import logging
from typing import Any
logger = logging.getLogger(__name__)
def __getattr__(name: str) -> Any:
if name in "PythonREPL":
raise AssertionError(
"PythonREPL has been deprecated from langchain_community due to being "
"flagged by security scanners. See: "
"https://github.com/langchain-ai/langchain/issues/14345 "
"If you need to use it, please use the version "
"from langchain_experimental. "
"from langchain_experimental.utilities.python import PythonREPL."
)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/apify.py | from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
if TYPE_CHECKING:
from langchain_community.document_loaders import ApifyDatasetLoader
class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
apify_api_token: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
client = ApifyClient(apify_api_token)
if httpx_client := getattr(client.http_client, "httpx_client"):
httpx_client.headers["user-agent"] += "; Origin/langchain"
async_client = ApifyClientAsync(apify_api_token)
if httpx_async_client := getattr(
async_client.http_client, "httpx_async_client"
):
httpx_async_client.headers["user-agent"] += "; Origin/langchain"
values["apify_client"] = client
values["apify_client_async"] = async_client
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
def call_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
task_call = self.apify_client.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
task_call = await self.apify_client_async.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/semanticscholar.py | """Utils for interacting with the Semantic Scholar API."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
class SemanticScholarAPIWrapper(BaseModel):
"""Wrapper around semanticscholar.org API.
https://github.com/danielnsilva/semanticscholar
You should have this library installed.
`pip install semanticscholar`
Semantic Scholar API can conduct searches and fetch document metadata
like title, abstract, authors, etc.
Attributes:
top_k_results: number of the top-scored document used for the Semantic Scholar tool
load_max_docs: a limit to the number of loaded documents
Example:
.. code-block:: python
from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper
ss = SemanticScholarAPIWrapper(
top_k_results = 3,
load_max_docs = 3
)
ss.run("biases in large language models")
"""
semanticscholar_search: Any #: :meta private:
top_k_results: int = 5
S2_MAX_QUERY_LENGTH: int = 300
load_max_docs: int = 100
doc_content_chars_max: Optional[int] = 4000
returned_fields: List[str] = [
"title",
"abstract",
"venue",
"year",
"paperId",
"citationCount",
"openAccessPdf",
"authors",
"externalIds",
]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
from semanticscholar import SemanticScholar
sch = SemanticScholar()
values["semanticscholar_search"] = sch.search_paper
except ImportError:
raise ImportError(
"Could not import Semanticscholar python package. "
"Please install it with `pip install semanticscholar`."
)
return values
def run(self, query: str) -> str:
"""Run the Semantic Scholar API."""
results = self.semanticscholar_search(
query, limit=self.load_max_docs, fields=self.returned_fields
)
documents = []
for item in results[: self.top_k_results]:
authors = ", ".join(
author["name"] for author in getattr(item, "authors", [])
)
documents.append(
f"Published year: {getattr(item, 'year', None)}\n"
f"Title: {getattr(item, 'title', None)}\n"
f"Authors: {authors}\n"
f"Abstract: {getattr(item, 'abstract', None)}\n"
)
if documents:
return "\n\n".join(documents)[: self.doc_content_chars_max]
else:
return "No results found."
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_jobs.py | """Util that calls Google Scholar Search."""
from typing import Any, Dict, Optional, cast
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
class GoogleJobsAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Scholar API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleJobsAPIWrapper
google_Jobs = GoogleJobsAPIWrapper()
google_Jobs.run('langchain')
"""
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Trends with Serpapi"""
# set up query
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_jobs",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()["jobs_results"]
# extract 1 job info:
res_str = ""
for i in range(1):
job = total_results[i]
res_str += (
"\n_______________________________________________"
+ f"\nJob Title: {job['title']}\n"
+ f"Company Name: {job['company_name']}\n"
+ f"Location: {job['location']}\n"
+ f"Description: {job['description']}"
+ "\n_______________________________________________\n"
)
return res_str + "\n"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/pubmed.py | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
api_key: API key to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "your_email@example.com"
api_key: str = ""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
if self.api_key != "":
url += f"&api_key={self.api_key}"
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
if self.api_key != "":
url += f"&api_key={self.api_key}"
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print( # noqa: T201
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/wikidata.py | """Util that calls Wikidata."""
import logging
from typing import Any, Dict, List, Optional
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
WIKIDATA_MAX_QUERY_LENGTH = 300
# Common properties you probably want to see filtered from https://www.wikidata.org/wiki/Wikidata:Database_reports/List_of_properties/all
DEFAULT_PROPERTIES = [
"P31",
"P279",
"P27",
"P361",
"P527",
"P495",
"P17",
"P585",
"P131",
"P106",
"P21",
"P569",
"P570",
"P577",
"P50",
"P571",
"P641",
"P625",
"P19",
"P69",
"P108",
"P136",
"P39",
"P161",
"P20",
"P101",
"P179",
"P175",
"P7937",
"P57",
"P607",
"P509",
"P800",
"P449",
"P580",
"P582",
"P276",
"P69",
"P112",
"P740",
"P159",
"P452",
"P102",
"P1142",
"P1387",
"P1576",
"P140",
"P178",
"P287",
"P25",
"P22",
"P40",
"P185",
"P802",
"P1416",
]
DEFAULT_LANG_CODE = "en"
WIKIDATA_USER_AGENT = "langchain-wikidata"
WIKIDATA_API_URL = "https://www.wikidata.org/w/api.php"
WIKIDATA_REST_API_URL = "https://www.wikidata.org/w/rest.php/wikibase/v0/"
class WikidataAPIWrapper(BaseModel):
"""Wrapper around the Wikidata API.
To use, you should have the ``wikibase-rest-api-client`` and
``mediawikiapi `` python packages installed.
This wrapper will use the Wikibase APIs to conduct searches and
fetch item content. By default, it will return the item content
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wikidata_mw: Any #: :meta private:
wikidata_rest: Any # : :meta private:
top_k_results: int = 2
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
wikidata_props: List[str] = DEFAULT_PROPERTIES
lang: str = DEFAULT_LANG_CODE
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
from mediawikiapi import MediaWikiAPI
from mediawikiapi.config import Config
values["wikidata_mw"] = MediaWikiAPI(
Config(user_agent=WIKIDATA_USER_AGENT, mediawiki_url=WIKIDATA_API_URL)
)
except ImportError:
raise ImportError(
"Could not import mediawikiapi python package. "
"Please install it with `pip install mediawikiapi`."
)
try:
from wikibase_rest_api_client import Client
client = Client(
timeout=60,
base_url=WIKIDATA_REST_API_URL,
headers={"User-Agent": WIKIDATA_USER_AGENT},
follow_redirects=True,
)
values["wikidata_rest"] = client
except ImportError:
raise ImportError(
"Could not import wikibase_rest_api_client python package. "
"Please install it with `pip install wikibase-rest-api-client`."
)
return values
def _item_to_document(self, qid: str) -> Optional[Document]:
from wikibase_rest_api_client.utilities.fluent import FluentWikibaseClient
fluent_client: FluentWikibaseClient = FluentWikibaseClient(
self.wikidata_rest, supported_props=self.wikidata_props, lang=self.lang
)
resp = fluent_client.get_item(qid)
if not resp:
logger.warning(f"Could not find item {qid} in Wikidata")
return None
doc_lines = []
if resp.label:
doc_lines.append(f"Label: {resp.label}")
if resp.description:
doc_lines.append(f"Description: {resp.description}")
if resp.aliases:
doc_lines.append(f"Aliases: {', '.join(resp.aliases)}")
for prop, values in resp.statements.items():
if values:
doc_lines.append(f"{prop.label}: {', '.join(values)}")
return Document(
page_content=("\n".join(doc_lines))[: self.doc_content_chars_max],
meta={"title": qid, "source": f"https://www.wikidata.org/wiki/{qid}"},
)
def load(self, query: str) -> List[Document]:
"""
Run Wikidata search and get the item documents plus the meta information.
"""
clipped_query = query[:WIKIDATA_MAX_QUERY_LENGTH]
items = self.wikidata_mw.search(clipped_query, results=self.top_k_results)
docs = []
for item in items[: self.top_k_results]:
if doc := self._item_to_document(item):
docs.append(doc)
return docs
def run(self, query: str) -> str:
"""Run Wikidata search and get item summaries."""
clipped_query = query[:WIKIDATA_MAX_QUERY_LENGTH]
items = self.wikidata_mw.search(clipped_query, results=self.top_k_results)
docs = []
for item in items[: self.top_k_results]:
if doc := self._item_to_document(item):
docs.append(f"Result {item}:\n{doc.page_content}")
if not docs:
return "No good Wikidata Search Result was found"
return "\n\n".join(docs)[: self.doc_content_chars_max]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/passio_nutrition_ai.py | """Util that invokes the Passio Nutrition AI API."""
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, Optional, final
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
class NoDiskStorage:
"""Mixin to prevent storing on disk."""
@final
def __getstate__(self) -> None:
raise AttributeError("Do not store on disk.")
@final
def __setstate__(self, state: Any) -> None:
raise AttributeError("Do not store on disk.")
try:
from tenacity import (
retry,
retry_if_result,
stop_after_attempt,
wait_exponential,
wait_random,
)
except ImportError:
# No retries if tenacity is not installed.
def retry_fallback(
f: Callable[..., Any], *args: Any, **kwargs: Any
) -> Callable[..., Any]:
return f
def stop_after_attempt_fallback(n: int) -> None:
return None
def wait_random_fallback(a: float, b: float) -> None:
return None
def wait_exponential_fallback(
multiplier: float = 1, min: float = 0, max: float = float("inf")
) -> None:
return None
def is_http_retryable(rsp: requests.Response) -> bool:
"""Check if a HTTP response is retryable."""
return bool(rsp) and rsp.status_code in [408, 425, 429, 500, 502, 503, 504]
class ManagedPassioLifeAuth(NoDiskStorage):
"""Manage the token for the NutritionAI API."""
_access_token_expiry: Optional[datetime]
def __init__(self, subscription_key: str):
self.subscription_key = subscription_key
self._last_token = None
self._access_token_expiry = None
self._access_token = None
self._customer_id = None
@property
def headers(self) -> dict:
if not self.is_valid_now():
self.refresh_access_token()
return {
"Authorization": f"Bearer {self._access_token}",
"Passio-ID": self._customer_id,
}
def is_valid_now(self) -> bool:
return (
self._access_token is not None
and self._customer_id is not None
and self._access_token_expiry is not None
and self._access_token_expiry > datetime.now()
)
@retry(
retry=retry_if_result(is_http_retryable),
stop=stop_after_attempt(4),
wait=wait_random(0, 0.3) + wait_exponential(multiplier=1, min=0.1, max=2),
)
def _http_get(self, subscription_key: str) -> requests.Response:
return requests.get(
f"https://api.passiolife.com/v2/token-cache/napi/oauth/token/{subscription_key}"
)
def refresh_access_token(self) -> None:
"""Refresh the access token for the NutritionAI API."""
rsp = self._http_get(self.subscription_key)
if not rsp:
raise ValueError("Could not get access token")
self._last_token = token = rsp.json()
self._customer_id = token["customer_id"]
self._access_token = token["access_token"]
self._access_token_expiry = (
datetime.now()
+ timedelta(seconds=token["expires_in"])
- timedelta(seconds=5)
)
# 5 seconds: approximate time for a token refresh to be processed.
DEFAULT_NUTRITIONAI_API_URL = (
"https://api.passiolife.com/v2/products/napi/food/search/advanced"
)
class NutritionAIAPI(BaseModel):
"""Wrapper for the Passio Nutrition AI API."""
nutritionai_subscription_key: str
nutritionai_api_url: str = Field(default=DEFAULT_NUTRITIONAI_API_URL)
more_kwargs: dict = Field(default_factory=dict)
auth_: ManagedPassioLifeAuth
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@retry(
retry=retry_if_result(is_http_retryable),
stop=stop_after_attempt(4),
wait=wait_random(0, 0.3) + wait_exponential(multiplier=1, min=0.1, max=2),
)
def _http_get(self, params: dict) -> requests.Response:
return requests.get(
self.nutritionai_api_url,
headers=self.auth_.headers,
params=params, # type: ignore
)
def _api_call_results(self, search_term: str) -> dict:
"""Call the NutritionAI API and return the results."""
rsp = self._http_get({"term": search_term, **self.more_kwargs})
if not rsp:
raise ValueError("Could not get NutritionAI API results")
rsp.raise_for_status()
return rsp.json()
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
nutritionai_subscription_key = get_from_dict_or_env(
values, "nutritionai_subscription_key", "NUTRITIONAI_SUBSCRIPTION_KEY"
)
values["nutritionai_subscription_key"] = nutritionai_subscription_key
nutritionai_api_url = get_from_dict_or_env(
values,
"nutritionai_api_url",
"NUTRITIONAI_API_URL",
DEFAULT_NUTRITIONAI_API_URL,
)
values["nutritionai_api_url"] = nutritionai_api_url
values["auth_"] = ManagedPassioLifeAuth(nutritionai_subscription_key)
return values
def run(self, query: str) -> Optional[Dict]:
"""Run query through NutrtitionAI API and parse result."""
results = self._api_call_results(query)
if results and len(results) < 1:
return None
return results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_serper.py | """Util that calls Google Search using the Serper.dev API."""
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Literal
class GoogleSerperAPIWrapper(BaseModel):
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
k: int = 10
gl: str = "us"
hl: str = "en"
# "places" and "images" is available from Serper but not implemented in the
# parser of run(). They can be used in results()
type: Literal["news", "search", "places", "images"] = "search"
result_key_for_type: dict = {
"news": "news",
"places": "places",
"images": "images",
"search": "organic",
}
tbs: Optional[str] = None
serper_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(
values, "serper_api_key", "SERPER_API_KEY"
)
values["serper_api_key"] = serper_api_key
return values
def results(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result."""
results = self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
return self._parse_results(results)
async def aresults(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return results
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result async."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return self._parse_results(results)
def _parse_snippets(self, results: dict) -> List[str]:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return [answer_box.get("answer")]
elif answer_box.get("snippet"):
return [answer_box.get("snippet").replace("\n", " ")]
elif answer_box.get("snippetHighlighted"):
return answer_box.get("snippetHighlighted")
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(result["snippet"])
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return ["No good Google Search Result was found"]
return snippets
def _parse_results(self, results: dict) -> str:
return " ".join(self._parse_snippets(results))
def _google_serper_api_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
response = requests.post(
f"https://google.serper.dev/{search_type}", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results
async def _async_google_serper_search_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
url = f"https://google.serper.dev/{search_type}"
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
url, params=params, headers=headers, raise_for_status=False
) as response:
search_results = await response.json()
else:
async with self.aiosession.post(
url, params=params, headers=headers, raise_for_status=True
) as response:
search_results = await response.json()
return search_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/spark_sql.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterable, List, Optional
if TYPE_CHECKING:
from pyspark.sql import DataFrame, Row, SparkSession
class SparkSQL:
"""SparkSQL is a utility class for interacting with Spark SQL."""
def __init__(
self,
spark_session: Optional[SparkSession] = None,
catalog: Optional[str] = None,
schema: Optional[str] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
):
"""Initialize a SparkSQL object.
Args:
spark_session: A SparkSession object.
If not provided, one will be created.
catalog: The catalog to use.
If not provided, the default catalog will be used.
schema: The schema to use.
If not provided, the default schema will be used.
ignore_tables: A list of tables to ignore.
If not provided, all tables will be used.
include_tables: A list of tables to include.
If not provided, all tables will be used.
sample_rows_in_table_info: The number of rows to include in the table info.
Defaults to 3.
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ImportError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
self._spark = (
spark_session if spark_session else SparkSession.builder.getOrCreate()
)
if catalog is not None:
self._spark.catalog.setCurrentCatalog(catalog)
if schema is not None:
self._spark.catalog.setCurrentDatabase(schema)
self._all_tables = set(self._get_all_table_names())
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SparkSQL:
"""Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ImportError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
spark = SparkSession.builder.remote(database_uri).getOrCreate()
return cls(spark, **kwargs)
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
# sorting the result can help LLM understanding it.
return sorted(self._all_tables - self._ignore_tables)
def _get_all_table_names(self) -> Iterable[str]:
rows = self._spark.sql("SHOW TABLES").select("tableName").collect()
return list(map(lambda row: row.tableName, rows))
def _get_create_table_stmt(self, table: str) -> str:
statement = (
self._spark.sql(f"SHOW CREATE TABLE {table}").collect()[0].createtab_stmt
)
# Ignore the data source provider and options to reduce the number of tokens.
using_clause_index = statement.find("USING")
return statement[:using_clause_index] + ";"
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
tables = []
for table_name in all_table_names:
table_info = self._get_create_table_stmt(table_name)
if self._sample_rows_in_table_info:
table_info += "\n\n/*"
table_info += f"\n{self._get_sample_spark_rows(table_name)}\n"
table_info += "*/"
tables.append(table_info)
final_str = "\n\n".join(tables)
return final_str
def _get_sample_spark_rows(self, table: str) -> str:
query = f"SELECT * FROM {table} LIMIT {self._sample_rows_in_table_info}"
df = self._spark.sql(query)
columns_str = "\t".join(list(map(lambda f: f.name, df.schema.fields)))
try:
sample_rows = self._get_dataframe_results(df)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
except Exception:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _convert_row_as_tuple(self, row: Row) -> tuple:
return tuple(map(str, row.asDict().values()))
def _get_dataframe_results(self, df: DataFrame) -> list:
return list(map(self._convert_row_as_tuple, df.collect()))
def run(self, command: str, fetch: str = "all") -> str:
df = self._spark.sql(command)
if fetch == "one":
df = df.limit(1)
return str(self._get_dataframe_results(df))
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except Exception as e:
"""Format the error message"""
return f"Error: {e}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/jina_search.py | import json
from typing import List
import requests
from langchain_core.documents import Document
from pydantic import BaseModel
from yarl import URL
class JinaSearchAPIWrapper(BaseModel):
"""Wrapper around the Jina search engine."""
base_url: str = "https://s.jina.ai/"
"""The base URL for the Jina search engine."""
def run(self, query: str) -> str:
"""Query the Jina search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": item.get("description"),
"content": item.get("content"),
}
for item in web_search_results
]
return json.dumps(final_results)
def download_documents(self, query: str) -> List[Document]:
"""Query the Jina search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [
Document(
page_content=item.get("content"), # type: ignore[arg-type]
metadata={
"title": item.get("title"),
"link": item.get("url"),
"description": item.get("description"),
},
)
for item in results
]
def _search_request(self, query: str) -> List[dict]:
headers = {
"Accept": "application/json",
}
url = str(URL(self.base_url + query))
response = requests.get(url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("data", [])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/oracleai.py | # Authors:
# Harichandan Roy (hroy)
# David Jiang (ddjiang)
#
# -----------------------------------------------------------------------------
# oracleai.py
# -----------------------------------------------------------------------------
from __future__ import annotations
import json
import logging
import traceback
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.documents import Document
if TYPE_CHECKING:
from oracledb import Connection
logger = logging.getLogger(__name__)
"""OracleSummary class"""
class OracleSummary:
"""Get Summary
Args:
conn: Oracle Connection,
params: Summary parameters,
proxy: Proxy
"""
def __init__(
self, conn: Connection, params: Dict[str, Any], proxy: Optional[str] = None
):
self.conn = conn
self.proxy = proxy
self.summary_params = params
def get_summary(self, docs: Any) -> List[str]:
"""Get the summary of the input docs.
Args:
docs: The documents to generate summary for.
Allowed input types: str, Document, List[str], List[Document]
Returns:
List of summary text, one for each input doc.
"""
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
if docs is None:
return []
results: List[str] = []
try:
oracledb.defaults.fetch_lobs = False
cursor = self.conn.cursor()
if self.proxy:
cursor.execute(
"begin utl_http.set_proxy(:proxy); end;", proxy=self.proxy
)
if isinstance(docs, str):
results = []
summary = cursor.var(oracledb.DB_TYPE_CLOB)
cursor.execute(
"""
declare
input clob;
begin
input := :data;
:summ := dbms_vector_chain.utl_to_summary(input, json(:params));
end;""",
data=docs,
params=json.dumps(self.summary_params),
summ=summary,
)
if summary is None:
results.append("")
else:
results.append(str(summary.getvalue()))
elif isinstance(docs, Document):
results = []
summary = cursor.var(oracledb.DB_TYPE_CLOB)
cursor.execute(
"""
declare
input clob;
begin
input := :data;
:summ := dbms_vector_chain.utl_to_summary(input, json(:params));
end;""",
data=docs.page_content,
params=json.dumps(self.summary_params),
summ=summary,
)
if summary is None:
results.append("")
else:
results.append(str(summary.getvalue()))
elif isinstance(docs, List):
results = []
for doc in docs:
summary = cursor.var(oracledb.DB_TYPE_CLOB)
if isinstance(doc, str):
cursor.execute(
"""
declare
input clob;
begin
input := :data;
:summ := dbms_vector_chain.utl_to_summary(input,
json(:params));
end;""",
data=doc,
params=json.dumps(self.summary_params),
summ=summary,
)
elif isinstance(doc, Document):
cursor.execute(
"""
declare
input clob;
begin
input := :data;
:summ := dbms_vector_chain.utl_to_summary(input,
json(:params));
end;""",
data=doc.page_content,
params=json.dumps(self.summary_params),
summ=summary,
)
else:
raise Exception("Invalid input type")
if summary is None:
results.append("")
else:
results.append(str(summary.getvalue()))
else:
raise Exception("Invalid input type")
cursor.close()
return results
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
cursor.close()
raise
# uncomment the following code block to run the test
"""
# A sample unit test.
''' get the Oracle connection '''
conn = oracledb.connect(
user="",
password="",
dsn="")
print("Oracle connection is established...")
''' params '''
summary_params = {"provider": "database","glevel": "S",
"numParagraphs": 1,"language": "english"}
proxy = ""
''' instance '''
summ = OracleSummary(conn=conn, params=summary_params, proxy=proxy)
summary = summ.get_summary("In the heart of the forest, " +
"a lone fox ventured out at dusk, seeking a lost treasure. " +
"With each step, memories flooded back, guiding its path. " +
"As the moon rose high, illuminating the night, the fox unearthed " +
"not gold, but a forgotten friendship, worth more than any riches.")
print(f"Summary generated by OracleSummary: {summary}")
conn.close()
print("Connection is closed.")
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/jira.py | """Util that calls Jira."""
from typing import Any, Dict, List, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
# TODO: think about error handling, more specific api specs, and jql/project limits
class JiraAPIWrapper(BaseModel):
"""Wrapper for Jira API."""
jira: Any = None #: :meta private:
confluence: Any = None
jira_username: Optional[str] = None
jira_api_token: Optional[str] = None
jira_instance_url: Optional[str] = None
jira_cloud: Optional[bool] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
jira_username = get_from_dict_or_env(
values, "jira_username", "JIRA_USERNAME", default=""
)
values["jira_username"] = jira_username
jira_api_token = get_from_dict_or_env(
values, "jira_api_token", "JIRA_API_TOKEN"
)
values["jira_api_token"] = jira_api_token
jira_instance_url = get_from_dict_or_env(
values, "jira_instance_url", "JIRA_INSTANCE_URL"
)
values["jira_instance_url"] = jira_instance_url
jira_cloud_str = get_from_dict_or_env(values, "jira_cloud", "JIRA_CLOUD")
jira_cloud = jira_cloud_str.lower() == "true"
values["jira_cloud"] = jira_cloud
try:
from atlassian import Confluence, Jira
except ImportError:
raise ImportError(
"atlassian-python-api is not installed. "
"Please install it with `pip install atlassian-python-api`"
)
if jira_username == "":
jira = Jira(
url=jira_instance_url,
token=jira_api_token,
cloud=jira_cloud,
)
else:
jira = Jira(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=jira_cloud,
)
confluence = Confluence(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=jira_cloud,
)
values["jira"] = jira
values["confluence"] = confluence
return values
def parse_issues(self, issues: Dict) -> List[dict]:
parsed = []
for issue in issues["issues"]:
key = issue["key"]
summary = issue["fields"]["summary"]
created = issue["fields"]["created"][0:10]
priority = issue["fields"]["priority"]["name"]
status = issue["fields"]["status"]["name"]
try:
assignee = issue["fields"]["assignee"]["displayName"]
except Exception:
assignee = "None"
rel_issues = {}
for related_issue in issue["fields"]["issuelinks"]:
if "inwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["inward"]
rel_key = related_issue["inwardIssue"]["key"]
rel_summary = related_issue["inwardIssue"]["fields"]["summary"]
if "outwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["outward"]
rel_key = related_issue["outwardIssue"]["key"]
rel_summary = related_issue["outwardIssue"]["fields"]["summary"]
rel_issues = {"type": rel_type, "key": rel_key, "summary": rel_summary}
parsed.append(
{
"key": key,
"summary": summary,
"created": created,
"assignee": assignee,
"priority": priority,
"status": status,
"related_issues": rel_issues,
}
)
return parsed
def parse_projects(self, projects: List[dict]) -> List[dict]:
parsed = []
for project in projects:
id = project["id"]
key = project["key"]
name = project["name"]
type = project["projectTypeKey"]
style = project["style"]
parsed.append(
{"id": id, "key": key, "name": name, "type": type, "style": style}
)
return parsed
def search(self, query: str) -> str:
issues = self.jira.jql(query)
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
def project(self) -> str:
projects = self.jira.projects()
parsed_projects = self.parse_projects(projects)
parsed_projects_str = (
"Found " + str(len(parsed_projects)) + " projects:\n" + str(parsed_projects)
)
return parsed_projects_str
def issue_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.jira.issue_create(fields=dict(params))
def page_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.confluence.create_page(**dict(params))
def other(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
jira_function = getattr(self.jira, params["function"])
return jira_function(*params.get("args", []), **params.get("kwargs", {}))
def run(self, mode: str, query: str) -> str:
if mode == "jql":
return self.search(query)
elif mode == "get_projects":
return self.project()
elif mode == "create_issue":
return self.issue_create(query)
elif mode == "other":
return self.other(query)
elif mode == "create_page":
return self.page_create(query)
else:
raise ValueError(f"Got unexpected mode {mode}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/reddit_search.py | """Wrapper for the Reddit API"""
from typing import Any, Dict, List, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
class RedditSearchAPIWrapper(BaseModel):
"""Wrapper for Reddit API
To use, set the environment variables ``REDDIT_CLIENT_ID``,
``REDDIT_CLIENT_SECRET``, ``REDDIT_USER_AGENT`` to set the client ID,
client secret, and user agent, respectively, as given by Reddit's API.
Alternatively, all three can be supplied as named parameters in the
constructor: ``reddit_client_id``, ``reddit_client_secret``, and
``reddit_user_agent``, respectively.
Example:
.. code-block:: python
from langchain_community.utilities import RedditSearchAPIWrapper
reddit_search = RedditSearchAPIWrapper()
"""
reddit_client: Any
# Values required to access Reddit API via praw
reddit_client_id: Optional[str]
reddit_client_secret: Optional[str]
reddit_user_agent: Optional[str]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the API ID, secret and user agent exists in environment
and check that praw module is present.
"""
reddit_client_id = get_from_dict_or_env(
values, "reddit_client_id", "REDDIT_CLIENT_ID"
)
values["reddit_client_id"] = reddit_client_id
reddit_client_secret = get_from_dict_or_env(
values, "reddit_client_secret", "REDDIT_CLIENT_SECRET"
)
values["reddit_client_secret"] = reddit_client_secret
reddit_user_agent = get_from_dict_or_env(
values, "reddit_user_agent", "REDDIT_USER_AGENT"
)
values["reddit_user_agent"] = reddit_user_agent
try:
import praw
except ImportError:
raise ImportError(
"praw package not found, please install it with pip install praw"
)
reddit_client = praw.Reddit(
client_id=reddit_client_id,
client_secret=reddit_client_secret,
user_agent=reddit_user_agent,
)
values["reddit_client"] = reddit_client
return values
def run(
self, query: str, sort: str, time_filter: str, subreddit: str, limit: int
) -> str:
"""Search Reddit and return posts as a single string."""
results: List[Dict] = self.results(
query=query,
sort=sort,
time_filter=time_filter,
subreddit=subreddit,
limit=limit,
)
if len(results) > 0:
output: List[str] = [f"Searching r/{subreddit} found {len(results)} posts:"]
for r in results:
category = "N/A" if r["post_category"] is None else r["post_category"]
p = f"Post Title: '{r['post_title']}'\n\
User: {r['post_author']}\n\
Subreddit: {r['post_subreddit']}:\n\
Text body: {r['post_text']}\n\
Post URL: {r['post_url']}\n\
Post Category: {category}.\n\
Score: {r['post_score']}\n"
output.append(p)
return "\n".join(output)
else:
return f"Searching r/{subreddit} did not find any posts:"
def results(
self, query: str, sort: str, time_filter: str, subreddit: str, limit: int
) -> List[Dict]:
"""Use praw to search Reddit and return a list of dictionaries,
one for each post.
"""
subredditObject = self.reddit_client.subreddit(subreddit)
search_results = subredditObject.search(
query=query, sort=sort, time_filter=time_filter, limit=limit
)
search_results = [r for r in search_results]
results_object = []
for submission in search_results:
results_object.append(
{
"post_subreddit": submission.subreddit_name_prefixed,
"post_category": submission.category,
"post_title": submission.title,
"post_text": submission.selftext,
"post_score": submission.score,
"post_id": submission.id,
"post_url": submission.url,
"post_author": submission.author,
}
)
return results_object
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/portkey.py | import json
import os
from typing import Dict, Optional
class Portkey:
"""Portkey configuration.
Attributes:
base: The base URL for the Portkey API.
Default: "https://api.portkey.ai/v1/proxy"
"""
base: str = "https://api.portkey.ai/v1/proxy"
@staticmethod
def Config(
api_key: str,
trace_id: Optional[str] = None,
environment: Optional[str] = None,
user: Optional[str] = None,
organisation: Optional[str] = None,
prompt: Optional[str] = None,
retry_count: Optional[int] = None,
cache: Optional[str] = None,
cache_force_refresh: Optional[str] = None,
cache_age: Optional[int] = None,
) -> Dict[str, str]:
assert retry_count is None or retry_count in range(
1, 6
), "retry_count must be an integer and in range [1, 2, 3, 4, 5]"
assert cache is None or cache in [
"simple",
"semantic",
], "cache must be 'simple' or 'semantic'"
assert cache_force_refresh is None or (
isinstance(cache_force_refresh, str)
and cache_force_refresh in ["True", "False"]
), "cache_force_refresh must be 'True' or 'False'"
assert cache_age is None or isinstance(
cache_age, int
), "cache_age must be an integer"
os.environ["OPENAI_API_BASE"] = Portkey.base
headers = {
"x-portkey-api-key": api_key,
"x-portkey-mode": "proxy openai",
}
if trace_id:
headers["x-portkey-trace-id"] = trace_id
if retry_count:
headers["x-portkey-retry-count"] = str(retry_count)
if cache:
headers["x-portkey-cache"] = cache
if cache_force_refresh:
headers["x-portkey-cache-force-refresh"] = cache_force_refresh
if cache_age:
headers["Cache-Control"] = f"max-age:{str(cache_age)}"
metadata = {}
if environment:
metadata["_environment"] = environment
if user:
metadata["_user"] = user
if organisation:
metadata["_organisation"] = organisation
if prompt:
metadata["_prompt"] = prompt
if metadata:
headers.update({"x-portkey-metadata": json.dumps(metadata)})
return headers
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/metaphor_search.py | """Util that calls Metaphor Search API.
In order to set this up, follow instructions at:
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
METAPHOR_API_URL = "https://api.metaphor.systems"
class MetaphorSearchAPIWrapper(BaseModel):
"""Wrapper for Metaphor Search API."""
metaphor_api_key: str
k: int = 10
model_config = ConfigDict(
extra="forbid",
)
def _metaphor_search_results(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
) -> List[dict]:
headers = {"X-Api-Key": self.metaphor_api_key}
params = {
"numResults": num_results,
"query": query,
"includeDomains": include_domains,
"excludeDomains": exclude_domains,
"startCrawlDate": start_crawl_date,
"endCrawlDate": end_crawl_date,
"startPublishedDate": start_published_date,
"endPublishedDate": end_published_date,
"useAutoprompt": use_autoprompt,
}
response = requests.post(
# type: ignore
f"{METAPHOR_API_URL}/search",
headers=headers,
json=params,
)
response.raise_for_status()
search_results = response.json()
return search_results["results"]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
metaphor_api_key = get_from_dict_or_env(
values, "metaphor_api_key", "METAPHOR_API_KEY"
)
values["metaphor_api_key"] = metaphor_api_key
return values
def results(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
) -> List[Dict]:
"""Run query through Metaphor Search and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
include_domains: A list of domains to include in the search. Only one of include_domains and exclude_domains should be defined.
exclude_domains: A list of domains to exclude from the search. Only one of include_domains and exclude_domains should be defined.
start_crawl_date: If specified, only pages we crawled after start_crawl_date will be returned.
end_crawl_date: If specified, only pages we crawled before end_crawl_date will be returned.
start_published_date: If specified, only pages published after start_published_date will be returned.
end_published_date: If specified, only pages published before end_published_date will be returned.
use_autoprompt: If true, we turn your query into a more Metaphor-friendly query. Adds latency.
Returns:
A list of dictionaries with the following keys:
title - The title of the page
url - The url
author - Author of the content, if applicable. Otherwise, None.
published_date - Estimated date published
in YYYY-MM-DD format. Otherwise, None.
""" # noqa: E501
raw_search_results = self._metaphor_search_results(
query,
num_results=num_results,
include_domains=include_domains,
exclude_domains=exclude_domains,
start_crawl_date=start_crawl_date,
end_crawl_date=end_crawl_date,
start_published_date=start_published_date,
end_published_date=end_published_date,
use_autoprompt=use_autoprompt,
)
return self._clean_results(raw_search_results)
async def results_async(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
) -> List[Dict]:
"""Get results from the Metaphor Search API asynchronously."""
# Function to perform the API call
async def fetch() -> str:
headers = {"X-Api-Key": self.metaphor_api_key}
params = {
"numResults": num_results,
"query": query,
"includeDomains": include_domains,
"excludeDomains": exclude_domains,
"startCrawlDate": start_crawl_date,
"endCrawlDate": end_crawl_date,
"startPublishedDate": start_published_date,
"endPublishedDate": end_published_date,
"useAutoprompt": use_autoprompt,
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{METAPHOR_API_URL}/search", json=params, headers=headers
) as res:
if res.status == 200:
data = await res.text()
return data
else:
raise Exception(f"Error {res.status}: {res.reason}")
results_json_str = await fetch()
results_json = json.loads(results_json_str)
return self._clean_results(results_json["results"])
def _clean_results(self, raw_search_results: List[Dict]) -> List[Dict]:
cleaned_results = []
for result in raw_search_results:
cleaned_results.append(
{
"title": result.get("title", "Unknown Title"),
"url": result.get("url", "Unknown URL"),
"author": result.get("author", "Unknown Author"),
"published_date": result.get("publishedDate", "Unknown Date"),
}
)
return cleaned_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/tensorflow_datasets.py | import logging
from typing import Any, Callable, Dict, Iterator, List, Optional
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
class TensorflowDatasets(BaseModel):
"""Access to the TensorFlow Datasets.
The Current implementation can work only with datasets that fit in a memory.
`TensorFlow Datasets` is a collection of datasets ready to use, with TensorFlow
or other Python ML frameworks, such as Jax. All datasets are exposed
as `tf.data.Datasets`.
To get started see the Guide: https://www.tensorflow.org/datasets/overview and
the list of datasets: https://www.tensorflow.org/datasets/catalog/
overview#all_datasets
You have to provide the sample_to_document_function: a function that
a sample from the dataset-specific format to the Document.
Attributes:
dataset_name: the name of the dataset to load
split_name: the name of the split to load. Defaults to "train".
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
to a Document
Example:
.. code-block:: python
from langchain_community.utilities import TensorflowDatasets
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
tsds_client = TensorflowDatasets(
dataset_name="mlqa/en",
split_name="train",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
"""
dataset_name: str = ""
split_name: str = "train"
load_max_docs: int = 100
sample_to_document_function: Optional[Callable[[Dict], Document]] = None
dataset: Any #: :meta private:
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in environment."""
try:
import tensorflow # noqa: F401
except ImportError:
raise ImportError(
"Could not import tensorflow python package. "
"Please install it with `pip install tensorflow`."
)
try:
import tensorflow_datasets
except ImportError:
raise ImportError(
"Could not import tensorflow_datasets python package. "
"Please install it with `pip install tensorflow-datasets`."
)
if values["sample_to_document_function"] is None:
raise ValueError(
"sample_to_document_function is None. "
"Please provide a function that converts a dataset sample to"
" a Document."
)
values["dataset"] = tensorflow_datasets.load(
values["dataset_name"], split=values["split_name"]
)
return values
def lazy_load(self) -> Iterator[Document]:
"""Download a selected dataset lazily.
Returns: an iterator of Documents.
"""
return (
self.sample_to_document_function(s)
for s in self.dataset.take(self.load_max_docs)
if self.sample_to_document_function is not None
)
def load(self) -> List[Document]:
"""Download a selected dataset.
Returns: a list of Documents.
"""
return list(self.lazy_load())
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/pebblo.py | from __future__ import annotations
import json
import logging
import os
import pathlib
import platform
from enum import Enum
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple
from langchain_core.documents import Document
from langchain_core.env import get_runtime_environment
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel
from requests import Response, request
from requests.exceptions import RequestException
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
PLUGIN_VERSION = "0.1.1"
_DEFAULT_CLASSIFIER_URL = "http://localhost:8000"
_DEFAULT_PEBBLO_CLOUD_URL = "https://api.daxa.ai"
BATCH_SIZE_BYTES = 100 * 1024 # 100 KB
# Supported loaders for Pebblo safe data loading
file_loader = [
"JSONLoader",
"S3FileLoader",
"UnstructuredMarkdownLoader",
"UnstructuredPDFLoader",
"UnstructuredFileLoader",
"UnstructuredJsonLoader",
"PyPDFLoader",
"GCSFileLoader",
"AmazonTextractPDFLoader",
"CSVLoader",
"UnstructuredExcelLoader",
"UnstructuredEmailLoader",
]
dir_loader = [
"DirectoryLoader",
"S3DirLoader",
"SlackDirectoryLoader",
"PyPDFDirectoryLoader",
"NotionDirectoryLoader",
]
in_memory = ["DataFrameLoader"]
cloud_folder = [
"NotionDBLoader",
"GoogleDriveLoader",
"SharePointLoader",
]
LOADER_TYPE_MAPPING = {
"file": file_loader,
"dir": dir_loader,
"in-memory": in_memory,
"cloud-folder": cloud_folder,
}
class Routes(str, Enum):
"""Routes available for the Pebblo API as enumerator."""
loader_doc = "/v1/loader/doc"
loader_app_discover = "/v1/app/discover"
class IndexedDocument(Document):
"""Pebblo Indexed Document."""
pb_id: str
"""Unique ID of the document."""
class Runtime(BaseModel):
"""Pebblo Runtime."""
type: str = "local"
"""Runtime type. Defaults to 'local'."""
host: str
"""Host name of the runtime."""
path: str
"""Current working directory path."""
ip: Optional[str] = ""
"""IP address of the runtime. Defaults to ''."""
platform: str
"""Platform details of the runtime."""
os: str
"""OS name."""
os_version: str
"""OS version."""
language: str
"""Runtime kernel."""
language_version: str
"""Version of the runtime kernel."""
runtime: str = "local"
"""More runtime details. Defaults to 'local'."""
class Framework(BaseModel):
"""Pebblo Framework instance."""
name: str
"""Name of the Framework."""
version: str
"""Version of the Framework."""
class App(BaseModel):
"""Pebblo AI application."""
name: str
"""Name of the app."""
owner: str
"""Owner of the app."""
description: Optional[str]
"""Description of the app."""
load_id: str
"""Unique load_id of the app instance."""
runtime: Runtime
"""Runtime details of the app."""
framework: Framework
"""Framework details of the app."""
plugin_version: str
"""Plugin version used for the app."""
client_version: Framework
"""Client version used for the app."""
class Doc(BaseModel):
"""Pebblo document."""
name: str
"""Name of app originating this document."""
owner: str
"""Owner of app."""
docs: list
"""List of documents with its metadata."""
plugin_version: str
"""Pebblo plugin Version"""
load_id: str
"""Unique load_id of the app instance."""
loader_details: dict
"""Loader details with its metadata."""
loading_end: bool
"""Boolean, specifying end of loading of source."""
source_owner: str
"""Owner of the source of the loader."""
classifier_location: str
"""Location of the classifier."""
anonymize_snippets: bool
"""Whether to anonymize snippets going into VectorDB and the generated reports"""
def get_full_path(path: str) -> str:
"""Return an absolute local path for a local file/directory,
for a network related path, return as is.
Args:
path (str): Relative path to be resolved.
Returns:
str: Resolved absolute path.
"""
if (
not path
or ("://" in path)
or ("/" == path[0])
or (path in ["unknown", "-", "in-memory"])
):
return path
full_path = pathlib.Path(path)
if full_path.exists():
full_path = full_path.resolve()
return str(full_path)
def get_loader_type(loader: str) -> str:
"""Return loader type among, file, dir or in-memory.
Args:
loader (str): Name of the loader, whose type is to be resolved.
Returns:
str: One of the loader type among, file/dir/in-memory.
"""
for loader_type, loaders in LOADER_TYPE_MAPPING.items():
if loader in loaders:
return loader_type
return "unsupported"
def get_loader_full_path(loader: BaseLoader) -> str:
"""Return an absolute source path of source of loader based on the
keys present in Document.
Args:
loader (BaseLoader): Langchain document loader, derived from Baseloader.
"""
from langchain_community.document_loaders import (
DataFrameLoader,
GCSFileLoader,
NotionDBLoader,
S3FileLoader,
)
location = "-"
if not isinstance(loader, BaseLoader):
logger.error(
"loader is not derived from BaseLoader, source location will be unknown!"
)
return location
loader_dict = loader.__dict__
try:
if "bucket" in loader_dict:
if isinstance(loader, GCSFileLoader):
location = f"gc://{loader.bucket}/{loader.blob}"
elif isinstance(loader, S3FileLoader):
location = f"s3://{loader.bucket}/{loader.key}"
elif "source" in loader_dict:
location = loader_dict["source"]
if location and "channel" in loader_dict:
channel = loader_dict["channel"]
if channel:
location = f"{location}/{channel}"
elif "path" in loader_dict:
location = loader_dict["path"]
elif "file_path" in loader_dict:
location = loader_dict["file_path"]
elif "web_paths" in loader_dict:
web_paths = loader_dict["web_paths"]
if web_paths and isinstance(web_paths, list) and len(web_paths) > 0:
location = web_paths[0]
# For in-memory types:
elif isinstance(loader, DataFrameLoader):
location = "in-memory"
elif isinstance(loader, NotionDBLoader):
location = f"notiondb://{loader.database_id}"
elif loader.__class__.__name__ == "GoogleDriveLoader":
if loader_dict.get("folder_id"):
folder_id = loader_dict.get("folder_id")
location = f"https://drive.google.com/drive/u/2/folders/{folder_id}"
elif loader_dict.get("file_ids"):
file_ids = loader_dict.get("file_ids", [])
location = ", ".join(
[
f"https://drive.google.com/file/d/{file_id}/view"
for file_id in file_ids
]
)
elif loader_dict.get("document_ids"):
document_ids = loader_dict.get("document_ids", [])
location = ", ".join(
[
f"https://docs.google.com/document/d/{doc_id}/edit"
for doc_id in document_ids
]
)
except Exception:
pass
return get_full_path(str(location))
def get_runtime() -> Tuple[Framework, Runtime]:
"""Fetch the current Framework and Runtime details.
Returns:
Tuple[Framework, Runtime]: Framework and Runtime for the current app instance.
"""
runtime_env = get_runtime_environment()
framework = Framework(
name="langchain", version=runtime_env.get("library_version", None)
)
uname = platform.uname()
runtime = Runtime(
host=uname.node,
path=os.environ["PWD"],
platform=runtime_env.get("platform", "unknown"),
os=uname.system,
os_version=uname.version,
ip=get_ip(),
language=runtime_env.get("runtime", "unknown"),
language_version=runtime_env.get("runtime_version", "unknown"),
)
if "Darwin" in runtime.os:
runtime.type = "desktop"
runtime.runtime = "Mac OSX"
logger.debug(f"framework {framework}")
logger.debug(f"runtime {runtime}")
return framework, runtime
def get_ip() -> str:
"""Fetch local runtime ip address.
Returns:
str: IP address
"""
import socket # lazy imports
host = socket.gethostname()
try:
public_ip = socket.gethostbyname(host)
except Exception:
public_ip = socket.gethostbyname("localhost")
return public_ip
def generate_size_based_batches(
docs: List[Document], max_batch_size: int = 100 * 1024
) -> List[List[Document]]:
"""
Generate batches of documents based on page_content size.
Args:
docs: List of documents to be batched.
max_batch_size: Maximum size of each batch in bytes. Defaults to 100*1024(100KB)
Returns:
List[List[Document]]: List of batches of documents
"""
batches: List[List[Document]] = []
current_batch: List[Document] = []
current_batch_size: int = 0
for doc in docs:
# Calculate the size of the document in bytes
doc_size: int = len(doc.page_content.encode("utf-8"))
if doc_size > max_batch_size:
# If a single document exceeds the max batch size, send it as a single batch
batches.append([doc])
else:
if current_batch_size + doc_size > max_batch_size:
# If adding this document exceeds the max batch size, start a new batch
batches.append(current_batch)
current_batch = []
current_batch_size = 0
# Add document to the current batch
current_batch.append(doc)
current_batch_size += doc_size
# Add the last batch if it has documents
if current_batch:
batches.append(current_batch)
return batches
def get_file_owner_from_path(file_path: str) -> str:
"""Fetch owner of local file path.
Args:
file_path (str): Local file path.
Returns:
str: Name of owner.
"""
try:
import pwd
file_owner_uid = os.stat(file_path).st_uid
file_owner_name = pwd.getpwuid(file_owner_uid).pw_name
except Exception:
file_owner_name = "unknown"
return file_owner_name
def get_source_size(source_path: str) -> int:
"""Fetch size of source path. Source can be a directory or a file.
Args:
source_path (str): Local path of data source.
Returns:
int: Source size in bytes.
"""
if not source_path:
return 0
size = 0
if os.path.isfile(source_path):
size = os.path.getsize(source_path)
elif os.path.isdir(source_path):
total_size = 0
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
size = total_size
return size
def calculate_content_size(data: str) -> int:
"""Calculate the content size in bytes:
- Encode the string to bytes using a specific encoding (e.g., UTF-8)
- Get the length of the encoded bytes.
Args:
data (str): Data string.
Returns:
int: Size of string in bytes.
"""
encoded_content = data.encode("utf-8")
size = len(encoded_content)
return size
class PebbloLoaderAPIWrapper(BaseModel):
"""Wrapper for Pebblo Loader API."""
api_key: Optional[str] # Use SecretStr
"""API key for Pebblo Cloud"""
classifier_location: str = "local"
"""Location of the classifier, local or cloud. Defaults to 'local'"""
classifier_url: Optional[str]
"""URL of the Pebblo Classifier"""
cloud_url: Optional[str]
"""URL of the Pebblo Cloud"""
anonymize_snippets: bool = False
"""Whether to anonymize snippets going into VectorDB and the generated reports"""
def __init__(self, **kwargs: Any):
"""Validate that api key in environment."""
kwargs["api_key"] = get_from_dict_or_env(
kwargs, "api_key", "PEBBLO_API_KEY", ""
)
kwargs["classifier_url"] = get_from_dict_or_env(
kwargs, "classifier_url", "PEBBLO_CLASSIFIER_URL", _DEFAULT_CLASSIFIER_URL
)
kwargs["cloud_url"] = get_from_dict_or_env(
kwargs, "cloud_url", "PEBBLO_CLOUD_URL", _DEFAULT_PEBBLO_CLOUD_URL
)
super().__init__(**kwargs)
def send_loader_discover(self, app: App) -> None:
"""
Send app discovery request to Pebblo server & cloud.
Args:
app (App): App instance to be discovered.
"""
pebblo_resp = None
payload = app.dict(exclude_unset=True)
if self.classifier_location == "local":
# Send app details to local classifier
headers = self._make_headers()
app_discover_url = (
f"{self.classifier_url}{Routes.loader_app_discover.value}"
)
pebblo_resp = self.make_request("POST", app_discover_url, headers, payload)
if self.api_key:
# Send app details to Pebblo cloud if api_key is present
headers = self._make_headers(cloud_request=True)
if pebblo_resp:
pebblo_server_version = json.loads(pebblo_resp.text).get(
"pebblo_server_version"
)
payload.update({"pebblo_server_version": pebblo_server_version})
payload.update({"pebblo_client_version": PLUGIN_VERSION})
pebblo_cloud_url = f"{self.cloud_url}{Routes.loader_app_discover.value}"
_ = self.make_request("POST", pebblo_cloud_url, headers, payload)
def classify_documents(
self,
docs_with_id: List[IndexedDocument],
app: App,
loader_details: dict,
loading_end: bool = False,
) -> dict:
"""
Send documents to Pebblo server for classification.
Then send classified documents to Daxa cloud(If api_key is present).
Args:
docs_with_id (List[IndexedDocument]): List of documents to be classified.
app (App): App instance.
loader_details (dict): Loader details.
loading_end (bool): Boolean, indicating the halt of data loading by loader.
"""
source_path = loader_details.get("source_path", "")
source_owner = get_file_owner_from_path(source_path)
# Prepare docs for classification
docs, source_aggregate_size = self.prepare_docs_for_classification(
docs_with_id, source_path, loader_details
)
# Build payload for classification
payload = self.build_classification_payload(
app, docs, loader_details, source_owner, source_aggregate_size, loading_end
)
classified_docs = {}
if self.classifier_location == "local":
# Send docs to local classifier
headers = self._make_headers()
load_doc_url = f"{self.classifier_url}{Routes.loader_doc.value}"
try:
pebblo_resp = self.make_request(
"POST", load_doc_url, headers, payload, 300
)
if pebblo_resp:
# Updating structure of pebblo response docs for efficient searching
for classified_doc in json.loads(pebblo_resp.text).get("docs", []):
classified_docs.update(
{classified_doc["pb_id"]: classified_doc}
)
except Exception as e:
logger.warning("An Exception caught in classify_documents: local %s", e)
if self.api_key:
# Send docs to Pebblo cloud if api_key is present
if self.classifier_location == "local":
# If local classifier is used add the classified information
# and remove doc content
self.update_doc_data(payload["docs"], classified_docs)
# Remove the anonymize_snippets key from payload
payload.pop("anonymize_snippets", None)
self.send_docs_to_pebblo_cloud(payload)
elif self.classifier_location == "pebblo-cloud":
logger.warning("API key is missing for sending docs to Pebblo cloud.")
raise NameError("API key is missing for sending docs to Pebblo cloud.")
return classified_docs
def send_docs_to_pebblo_cloud(self, payload: dict) -> None:
"""
Send documents to Pebblo cloud.
Args:
payload (dict): The payload containing documents to be sent.
"""
headers = self._make_headers(cloud_request=True)
pebblo_cloud_url = f"{self.cloud_url}{Routes.loader_doc.value}"
try:
_ = self.make_request("POST", pebblo_cloud_url, headers, payload)
except Exception as e:
logger.warning("An Exception caught in classify_documents: cloud %s", e)
def _make_headers(self, cloud_request: bool = False) -> dict:
"""
Generate headers for the request.
args:
cloud_request (bool): flag indicating whether the request is for Pebblo
cloud.
returns:
dict: Headers for the request.
"""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if cloud_request:
# Add API key for Pebblo cloud request
if self.api_key:
headers.update({"x-api-key": self.api_key})
else:
logger.warning("API key is missing for Pebblo cloud request.")
return headers
def build_classification_payload(
self,
app: App,
docs: List[dict],
loader_details: dict,
source_owner: str,
source_aggregate_size: int,
loading_end: bool,
) -> dict:
"""
Build the payload for document classification.
Args:
app (App): App instance.
docs (List[dict]): List of documents to be classified.
loader_details (dict): Loader details.
source_owner (str): Owner of the source.
source_aggregate_size (int): Aggregate size of the source.
loading_end (bool): Boolean indicating the halt of data loading by loader.
Returns:
dict: Payload for document classification.
"""
payload: Dict[str, Any] = {
"name": app.name,
"owner": app.owner,
"docs": docs,
"plugin_version": PLUGIN_VERSION,
"load_id": app.load_id,
"loader_details": loader_details,
"loading_end": "false",
"source_owner": source_owner,
"classifier_location": self.classifier_location,
"anonymize_snippets": self.anonymize_snippets,
}
if loading_end is True:
payload["loading_end"] = "true"
if "loader_details" in payload:
payload["loader_details"]["source_aggregate_size"] = (
source_aggregate_size
)
payload = Doc(**payload).dict(exclude_unset=True)
return payload
@staticmethod
def make_request(
method: str,
url: str,
headers: dict,
payload: Optional[dict] = None,
timeout: int = 20,
) -> Optional[Response]:
"""
Make a request to the Pebblo API
Args:
method (str): HTTP method (GET, POST, PUT, DELETE, etc.).
url (str): URL for the request.
headers (dict): Headers for the request.
payload (Optional[dict]): Payload for the request (for POST, PUT, etc.).
timeout (int): Timeout for the request in seconds.
Returns:
Optional[Response]: Response object if the request is successful.
"""
try:
response = request(
method=method, url=url, headers=headers, json=payload, timeout=timeout
)
logger.debug(
"Request: method %s, url %s, len %s response status %s",
method,
response.request.url,
str(len(response.request.body if response.request.body else [])),
str(response.status_code),
)
if response.status_code >= HTTPStatus.INTERNAL_SERVER_ERROR:
logger.warning(f"Pebblo Server: Error {response.status_code}")
elif response.status_code >= HTTPStatus.BAD_REQUEST:
logger.warning(f"Pebblo received an invalid payload: {response.text}")
elif response.status_code != HTTPStatus.OK:
logger.warning(
f"Pebblo returned an unexpected response code: "
f"{response.status_code}"
)
return response
except RequestException:
logger.warning("Unable to reach server %s", url)
except Exception as e:
logger.warning("An Exception caught in make_request: %s", e)
return None
@staticmethod
def prepare_docs_for_classification(
docs_with_id: List[IndexedDocument],
source_path: str,
loader_details: dict,
) -> Tuple[List[dict], int]:
"""
Prepare documents for classification.
Args:
docs_with_id (List[IndexedDocument]): List of documents to be classified.
source_path (str): Source path of the documents.
loader_details (dict): Contains loader info.
Returns:
Tuple[List[dict], int]: Documents and the aggregate size
of the source.
"""
docs = []
source_aggregate_size = 0
doc_content = [doc.dict() for doc in docs_with_id]
source_path_update = False
for doc in doc_content:
doc_metadata = doc.get("metadata", {})
doc_authorized_identities = doc_metadata.get("authorized_identities", [])
if loader_details["loader"] == "SharePointLoader":
doc_source_path = get_full_path(
doc_metadata.get("source", loader_details["source_path"])
)
else:
doc_source_path = get_full_path(
doc_metadata.get(
"full_path",
doc_metadata.get("source", source_path),
)
)
doc_source_owner = doc_metadata.get(
"owner", get_file_owner_from_path(doc_source_path)
)
doc_source_size = doc_metadata.get("size", get_source_size(doc_source_path))
page_content = str(doc.get("page_content"))
page_content_size = calculate_content_size(page_content)
source_aggregate_size += page_content_size
doc_id = doc.get("pb_id", None) or 0
docs.append(
{
"doc": page_content,
"source_path": doc_source_path,
"pb_id": doc_id,
"last_modified": doc.get("metadata", {}).get("last_modified"),
"file_owner": doc_source_owner,
**(
{"authorized_identities": doc_authorized_identities}
if doc_authorized_identities
else {}
),
**(
{"source_path_size": doc_source_size}
if doc_source_size is not None
else {}
),
}
)
if (
loader_details["loader"] == "SharePointLoader"
and not source_path_update
):
loader_details["source_path"] = doc_metadata.get("source_full_url")
source_path_update = True
return docs, source_aggregate_size
@staticmethod
def update_doc_data(docs: List[dict], classified_docs: dict) -> None:
"""
Update the document data with classified information.
Args:
docs (List[dict]): List of document data to be updated.
classified_docs (dict): The dictionary containing classified documents.
"""
for doc_data in docs:
classified_data = classified_docs.get(doc_data["pb_id"], {})
# Update the document data with classified information
doc_data.update(
{
"pb_checksum": classified_data.get("pb_checksum"),
"loader_source_path": classified_data.get("loader_source_path"),
"entities": classified_data.get("entities", {}),
"topics": classified_data.get("topics", {}),
}
)
# Remove the document content
doc_data.pop("doc")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/tavily_search.py | """Util that calls Tavily Search API.
In order to set this up, follow instructions at:
https://docs.tavily.com/docs/tavily-api/introduction
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
TAVILY_API_URL = "https://api.tavily.com"
class TavilySearchAPIWrapper(BaseModel):
"""Wrapper for Tavily Search API."""
tavily_api_key: SecretStr
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
tavily_api_key = get_from_dict_or_env(
values, "tavily_api_key", "TAVILY_API_KEY"
)
values["tavily_api_key"] = tavily_api_key
return values
def raw_results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> Dict:
params = {
"api_key": self.tavily_api_key.get_secret_value(),
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
response = requests.post(
# type: ignore
f"{TAVILY_API_URL}/search",
json=params,
)
response.raise_for_status()
return response.json()
def results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
"""Run query through Tavily Search and return metadata.
Args:
query: The query to search for.
max_results: The maximum number of results to return.
search_depth: The depth of the search. Can be "basic" or "advanced".
include_domains: A list of domains to include in the search.
exclude_domains: A list of domains to exclude from the search.
include_answer: Whether to include the answer in the results.
include_raw_content: Whether to include the raw content in the results.
include_images: Whether to include images in the results.
Returns:
query: The query that was searched for.
follow_up_questions: A list of follow up questions.
response_time: The response time of the query.
answer: The answer to the query.
images: A list of images.
results: A list of dictionaries containing the results:
title: The title of the result.
url: The url of the result.
content: The content of the result.
score: The score of the result.
raw_content: The raw content of the result.
"""
raw_search_results = self.raw_results(
query,
max_results=max_results,
search_depth=search_depth,
include_domains=include_domains,
exclude_domains=exclude_domains,
include_answer=include_answer,
include_raw_content=include_raw_content,
include_images=include_images,
)
return self.clean_results(raw_search_results["results"])
async def raw_results_async(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> Dict:
"""Get results from the Tavily Search API asynchronously."""
# Function to perform the API call
async def fetch() -> str:
params = {
"api_key": self.tavily_api_key.get_secret_value(),
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{TAVILY_API_URL}/search", json=params) as res:
if res.status == 200:
data = await res.text()
return data
else:
raise Exception(f"Error {res.status}: {res.reason}")
results_json_str = await fetch()
return json.loads(results_json_str)
async def results_async(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
results_json = await self.raw_results_async(
query=query,
max_results=max_results,
search_depth=search_depth,
include_domains=include_domains,
exclude_domains=exclude_domains,
include_answer=include_answer,
include_raw_content=include_raw_content,
include_images=include_images,
)
return self.clean_results(results_json["results"])
def clean_results(self, results: List[Dict]) -> List[Dict]:
"""Clean results from Tavily Search API."""
clean_results = []
for result in results:
clean_results.append(
{
"url": result["url"],
"content": result["content"],
}
)
return clean_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/outline.py | """Util that calls Outline."""
import logging
from typing import Any, Dict, List, Optional
import requests
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
logger = logging.getLogger(__name__)
OUTLINE_MAX_QUERY_LENGTH = 300
class OutlineAPIWrapper(BaseModel):
"""Wrapper around OutlineAPI.
This wrapper will use the Outline API to query the documents of your instance.
By default it will return the document content of the top-k results.
It limits the document content by doc_content_chars_max.
"""
top_k_results: int = 3
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
outline_instance_url: Optional[str] = None
outline_api_key: Optional[str] = None
outline_search_endpoint: str = "/api/documents.search"
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that instance url and api key exists in environment."""
outline_instance_url = get_from_dict_or_env(
values, "outline_instance_url", "OUTLINE_INSTANCE_URL"
)
values["outline_instance_url"] = outline_instance_url
outline_api_key = get_from_dict_or_env(
values, "outline_api_key", "OUTLINE_API_KEY"
)
values["outline_api_key"] = outline_api_key
return values
def _result_to_document(self, outline_res: Any) -> Document:
main_meta = {
"title": outline_res["document"]["title"],
"source": self.outline_instance_url + outline_res["document"]["url"],
}
add_meta = (
{
"id": outline_res["document"]["id"],
"ranking": outline_res["ranking"],
"collection_id": outline_res["document"]["collectionId"],
"parent_document_id": outline_res["document"]["parentDocumentId"],
"revision": outline_res["document"]["revision"],
"created_by": outline_res["document"]["createdBy"]["name"],
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=outline_res["document"]["text"][: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _outline_api_query(self, query: str) -> List:
raw_result = requests.post(
f"{self.outline_instance_url}{self.outline_search_endpoint}",
data={"query": query, "limit": self.top_k_results},
headers={"Authorization": f"Bearer {self.outline_api_key}"},
)
if not raw_result.ok:
raise ValueError("Outline API returned an error: ", raw_result.text)
return raw_result.json()["data"]
def run(self, query: str) -> List[Document]:
"""
Run Outline search and get the document content plus the meta information.
Returns: a list of documents.
"""
results = self._outline_api_query(query[:OUTLINE_MAX_QUERY_LENGTH])
docs = []
for result in results[: self.top_k_results]:
if doc := self._result_to_document(result):
docs.append(doc)
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/astradb.py | from __future__ import annotations
import asyncio
import inspect
from asyncio import InvalidStateError, Task
from enum import Enum
from typing import TYPE_CHECKING, Awaitable, Optional, Union
if TYPE_CHECKING:
from astrapy.db import (
AstraDB,
AsyncAstraDB,
)
class SetupMode(Enum):
"""Setup mode for AstraDBEnvironment as enumerator."""
SYNC = 1
ASYNC = 2
OFF = 3
class _AstraDBEnvironment:
def __init__(
self,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[AstraDB] = None,
async_astra_db_client: Optional[AsyncAstraDB] = None,
namespace: Optional[str] = None,
) -> None:
self.token = token
self.api_endpoint = api_endpoint
astra_db = astra_db_client
async_astra_db = async_astra_db_client
self.namespace = namespace
try:
from astrapy.db import (
AstraDB,
AsyncAstraDB,
)
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import a recent astrapy python package. "
"Please install it with `pip install --upgrade astrapy`."
)
# Conflicting-arg checks:
if astra_db_client is not None or async_astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' or 'async_astra_db_client' to "
"AstraDBEnvironment if passing 'token' and 'api_endpoint'."
)
if token and api_endpoint:
astra_db = AstraDB(
token=token,
api_endpoint=api_endpoint,
namespace=self.namespace,
)
async_astra_db = AsyncAstraDB(
token=token,
api_endpoint=api_endpoint,
namespace=self.namespace,
)
if astra_db:
self.astra_db = astra_db
if async_astra_db:
self.async_astra_db = async_astra_db
else:
self.async_astra_db = AsyncAstraDB(
token=self.astra_db.token,
api_endpoint=self.astra_db.base_url,
api_path=self.astra_db.api_path,
api_version=self.astra_db.api_version,
namespace=self.astra_db.namespace,
)
elif async_astra_db:
self.async_astra_db = async_astra_db
self.astra_db = AstraDB(
token=self.async_astra_db.token,
api_endpoint=self.async_astra_db.base_url,
api_path=self.async_astra_db.api_path,
api_version=self.async_astra_db.api_version,
namespace=self.async_astra_db.namespace,
)
else:
raise ValueError(
"Must provide 'astra_db_client' or 'async_astra_db_client' or "
"'token' and 'api_endpoint'"
)
class _AstraDBCollectionEnvironment(_AstraDBEnvironment):
def __init__(
self,
collection_name: str,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[AstraDB] = None,
async_astra_db_client: Optional[AsyncAstraDB] = None,
namespace: Optional[str] = None,
setup_mode: SetupMode = SetupMode.SYNC,
pre_delete_collection: bool = False,
embedding_dimension: Union[int, Awaitable[int], None] = None,
metric: Optional[str] = None,
) -> None:
from astrapy.db import AstraDBCollection, AsyncAstraDBCollection
super().__init__(
token, api_endpoint, astra_db_client, async_astra_db_client, namespace
)
self.collection_name = collection_name
self.collection = AstraDBCollection(
collection_name=collection_name,
astra_db=self.astra_db,
)
self.async_collection = AsyncAstraDBCollection(
collection_name=collection_name,
astra_db=self.async_astra_db,
)
self.async_setup_db_task: Optional[Task] = None
if setup_mode == SetupMode.ASYNC:
async_astra_db = self.async_astra_db
async def _setup_db() -> None:
if pre_delete_collection:
await async_astra_db.delete_collection(collection_name)
if inspect.isawaitable(embedding_dimension):
dimension: Optional[int] = await embedding_dimension
else:
dimension = embedding_dimension
await async_astra_db.create_collection(
collection_name, dimension=dimension, metric=metric
)
self.async_setup_db_task = asyncio.create_task(_setup_db())
elif setup_mode == SetupMode.SYNC:
if pre_delete_collection:
self.astra_db.delete_collection(collection_name)
if inspect.isawaitable(embedding_dimension):
raise ValueError(
"Cannot use an awaitable embedding_dimension with async_setup "
"set to False"
)
self.astra_db.create_collection(
collection_name,
dimension=embedding_dimension, # type: ignore[arg-type]
metric=metric,
)
def ensure_db_setup(self) -> None:
if self.async_setup_db_task:
try:
self.async_setup_db_task.result()
except InvalidStateError:
raise ValueError(
"Asynchronous setup of the DB not finished. "
"NB: AstraDB components sync methods shouldn't be called from the "
"event loop. Consider using their async equivalents."
)
async def aensure_db_setup(self) -> None:
if self.async_setup_db_task:
await self.async_setup_db_task
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/openapi.py | """Utility functions for parsing an OpenAPI spec."""
from __future__ import annotations
import copy
import json
import logging
import re
from enum import Enum
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import requests
import yaml
from pydantic import ValidationError
logger = logging.getLogger(__name__)
class HTTPVerb(str, Enum):
"""Enumerator of the HTTP verbs."""
GET = "get"
PUT = "put"
POST = "post"
DELETE = "delete"
OPTIONS = "options"
HEAD = "head"
PATCH = "patch"
TRACE = "trace"
@classmethod
def from_str(cls, verb: str) -> HTTPVerb:
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}")
if TYPE_CHECKING:
from openapi_pydantic import (
Components,
Operation,
Parameter,
PathItem,
Paths,
Reference,
RequestBody,
Schema,
)
try:
from openapi_pydantic import OpenAPI
except ImportError:
OpenAPI = object # type: ignore
class OpenAPISpec(OpenAPI):
"""OpenAPI Model that removes mis-formatted parts of the spec."""
openapi: str = "3.1.0" # overriding overly restrictive type from parent class
@property
def _paths_strict(self) -> Paths:
if not self.paths:
raise ValueError("No paths found in spec")
return self.paths
def _get_path_strict(self, path: str) -> PathItem:
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f"No path found for {path}")
return path_item
@property
def _components_strict(self) -> Components:
"""Get components or err."""
if self.components is None:
raise ValueError("No components found in spec. ")
return self.components
@property
def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]:
"""Get parameters or err."""
parameters = self._components_strict.parameters
if parameters is None:
raise ValueError("No parameters found in spec. ")
return parameters
@property
def _schemas_strict(self) -> Dict[str, Schema]:
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError("No schemas found in spec. ")
return schemas
@property
def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]:
"""Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError("No request body found in spec. ")
return request_bodies
def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]:
"""Get a parameter (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
parameters = self._parameters_strict
if ref_name not in parameters:
raise ValueError(f"No parameter found for {ref_name}")
return parameters[ref_name]
def _get_root_referenced_parameter(self, ref: Reference) -> Parameter:
"""Get the root reference or err."""
from openapi_pydantic import Reference
parameter = self._get_referenced_parameter(ref)
while isinstance(parameter, Reference):
parameter = self._get_referenced_parameter(parameter)
return parameter
def get_referenced_schema(self, ref: Reference) -> Schema:
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f"No schema found for {ref_name}")
return schemas[ref_name]
def get_schema(
self,
schema: Union[Reference, Schema],
depth: int = 0,
max_depth: Optional[int] = None,
) -> Schema:
if max_depth is not None and depth >= max_depth:
raise RecursionError(
f"Max depth of {max_depth} has been exceeded when resolving references."
)
from openapi_pydantic import Reference
if isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
# TODO: Resolve references on all fields of Schema ?
# (e.g. patternProperties, etc...)
if schema.properties is not None:
for p_name, p in schema.properties.items():
schema.properties[p_name] = self.get_schema(p, depth + 1, max_depth)
if schema.items is not None:
schema.items = self.get_schema(schema.items, depth + 1, max_depth)
return schema
def _get_root_referenced_schema(self, ref: Reference) -> Schema:
"""Get the root reference or err."""
from openapi_pydantic import Reference
schema = self.get_referenced_schema(ref)
while isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
return schema
def _get_referenced_request_body(
self, ref: Reference
) -> Optional[Union[Reference, RequestBody]]:
"""Get a request body (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
request_bodies = self._request_bodies_strict
if ref_name not in request_bodies:
raise ValueError(f"No request body found for {ref_name}")
return request_bodies[ref_name]
def _get_root_referenced_request_body(
self, ref: Reference
) -> Optional[RequestBody]:
"""Get the root request Body or err."""
from openapi_pydantic import Reference
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
@staticmethod
def _alert_unsupported_spec(obj: dict) -> None:
"""Alert if the spec is not supported."""
warning_message = (
" This may result in degraded performance."
+ " Convert your OpenAPI spec to 3.1.* spec"
+ " for better support."
)
swagger_version = obj.get("swagger")
openapi_version = obj.get("openapi")
if isinstance(openapi_version, str):
if openapi_version != "3.1.0":
logger.warning(
f"Attempting to load an OpenAPI {openapi_version}"
f" spec. {warning_message}"
)
else:
pass
elif isinstance(swagger_version, str):
logger.warning(
f"Attempting to load a Swagger {swagger_version}"
f" spec. {warning_message}"
)
else:
raise ValueError(
"Attempting to load an unsupported spec:"
f"\n\n{obj}\n{warning_message}"
)
@classmethod
def parse_obj(cls, obj: dict) -> OpenAPISpec:
try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
# We are handling possibly misconfigured specs and
# want to do a best-effort job to get a reasonable interface out of it.
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error["loc"]
item = new_obj
for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj)
@classmethod
def from_spec_dict(cls, spec_dict: dict) -> OpenAPISpec:
"""Get an OpenAPI spec from a dict."""
return cls.parse_obj(spec_dict)
@classmethod
def from_text(cls, text: str) -> OpenAPISpec:
"""Get an OpenAPI spec from a text."""
try:
spec_dict = json.loads(text)
except json.JSONDecodeError:
spec_dict = yaml.safe_load(text)
return cls.from_spec_dict(spec_dict)
@classmethod
def from_file(cls, path: Union[str, Path]) -> OpenAPISpec:
"""Get an OpenAPI spec from a file path."""
path_ = path if isinstance(path, Path) else Path(path)
if not path_.exists():
raise FileNotFoundError(f"{path} does not exist")
with path_.open("r") as f:
return cls.from_text(f.read())
@classmethod
def from_url(cls, url: str) -> OpenAPISpec:
"""Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text)
@property
def base_url(self) -> str:
"""Get the base url."""
return self.servers[0].url
def get_methods_for_path(self, path: str) -> List[str]:
"""Return a list of valid methods for the specified path."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
def get_parameters_for_path(self, path: str) -> List[Parameter]:
from openapi_pydantic import Reference
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_operation(self, path: str, method: str) -> Operation:
"""Get the operation object for a given path and HTTP method."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
operation_obj = getattr(path_item, method, None)
if not isinstance(operation_obj, Operation):
raise ValueError(f"No {method} method found for {path}")
return operation_obj
def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]:
"""Get the components for a given operation."""
from openapi_pydantic import Reference
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_request_body_for_operation(
self, operation: Operation
) -> Optional[RequestBody]:
"""Get the request body for a given operation."""
from openapi_pydantic import Reference
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body
@staticmethod
def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str:
"""Get a cleaned operation id from an operation id."""
operation_id = operation.operationId
if operation_id is None:
# Replace all punctuation of any kind with underscore
path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/"))
operation_id = f"{path}_{method}"
return operation_id.replace("-", "_").replace(".", "_").replace("/", "_")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/opaqueprompts.py | from typing import Dict, Union
def sanitize(
input: Union[str, Dict[str, str]],
) -> Dict[str, Union[str, Dict[str, str]]]:
"""
Sanitize input string or dict of strings by replacing sensitive data with
placeholders.
It returns the sanitized input string or dict of strings and the secure
context as a dict following the format:
{
"sanitized_input": <sanitized input string or dict of strings>,
"secure_context": <secure context>
}
The secure context is a bytes object that is needed to de-sanitize the response
from the LLM.
Args:
input: Input string or dict of strings.
Returns:
Sanitized input string or dict of strings and the secure context
as a dict following the format:
{
"sanitized_input": <sanitized input string or dict of strings>,
"secure_context": <secure context>
}
The `secure_context` needs to be passed to the `desanitize` function.
Raises:
ValueError: If the input is not a string or dict of strings.
ImportError: If the `opaqueprompts` Python package is not installed.
"""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if isinstance(input, str):
# the input could be a string, so we sanitize the string
sanitize_response: op.SanitizeResponse = op.sanitize([input])
return {
"sanitized_input": sanitize_response.sanitized_texts[0],
"secure_context": sanitize_response.secure_context,
}
if isinstance(input, dict):
# the input could be a dict[string, string], so we sanitize the values
values = list()
# get the values from the dict
for key in input:
values.append(input[key])
# sanitize the values
sanitize_values_response: op.SanitizeResponse = op.sanitize(values)
# reconstruct the dict with the sanitized values
sanitized_input_values = sanitize_values_response.sanitized_texts
idx = 0
sanitized_input = dict()
for key in input:
sanitized_input[key] = sanitized_input_values[idx]
idx += 1
return {
"sanitized_input": sanitized_input,
"secure_context": sanitize_values_response.secure_context,
}
raise ValueError(f"Unexpected input type {type(input)}")
def desanitize(sanitized_text: str, secure_context: bytes) -> str:
"""
Restore the original sensitive data from the sanitized text.
Args:
sanitized_text: Sanitized text.
secure_context: Secure context returned by the `sanitize` function.
Returns:
De-sanitized text.
"""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
desanitize_response: op.DesanitizeResponse = op.desanitize(
sanitized_text, secure_context
)
return desanitize_response.desanitized_text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/vertexai.py | """Utilities to init Vertex AI."""
from importlib import metadata
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator
if TYPE_CHECKING:
from google.api_core.gapic_v1.client_info import ClientInfo
from google.auth.credentials import Credentials
from vertexai.preview.generative_models import Image
def create_retry_decorator(
llm: BaseLLM,
*,
max_retries: int = 1,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Create a retry decorator for Vertex / Palm LLMs."""
import google.api_core
errors = [
google.api_core.exceptions.ResourceExhausted,
google.api_core.exceptions.ServiceUnavailable,
google.api_core.exceptions.Aborted,
google.api_core.exceptions.DeadlineExceeded,
google.api_core.exceptions.GoogleAPIError,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=max_retries, run_manager=run_manager
)
return decorator
def raise_vertex_import_error(minimum_expected_version: str = "1.38.0") -> None:
"""Raise ImportError related to Vertex SDK being not available.
Args:
minimum_expected_version: The lowest expected version of the SDK.
Raises:
ImportError: an ImportError that mentions a required version of the SDK.
"""
raise ImportError(
"Please, install or upgrade the google-cloud-aiplatform library: "
f"pip install google-cloud-aiplatform>={minimum_expected_version}"
)
def init_vertexai(
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional["Credentials"] = None,
) -> None:
"""Init Vertex AI.
Args:
project: The default GCP project to use when making Vertex API calls.
location: The default location to use when making API calls.
credentials: The default custom
credentials to use when making API calls. If not provided credentials
will be ascertained from the environment.
Raises:
ImportError: If importing vertexai SDK did not succeed.
"""
try:
import vertexai
except ImportError:
raise_vertex_import_error()
vertexai.init(
project=project,
location=location,
credentials=credentials,
)
def get_client_info(module: Optional[str] = None) -> "ClientInfo":
r"""Return a custom user agent header.
Args:
module (Optional[str]):
Optional. The module for a custom user agent header.
Returns:
google.api_core.gapic_v1.client_info.ClientInfo
"""
try:
from google.api_core.gapic_v1.client_info import ClientInfo
except ImportError as exc:
raise ImportError(
"Could not import ClientInfo. Please, install it with "
"pip install google-api-core"
) from exc
langchain_version = metadata.version("langchain")
client_library_version = (
f"{langchain_version}-{module}" if module else langchain_version
)
return ClientInfo(
client_library_version=client_library_version,
user_agent=f"langchain/{client_library_version}",
)
def load_image_from_gcs(path: str, project: Optional[str] = None) -> "Image":
"""Load an image from Google Cloud Storage."""
try:
from google.cloud import storage
except ImportError:
raise ImportError("Could not import google-cloud-storage python package.")
from vertexai.preview.generative_models import Image
gcs_client = storage.Client(project=project)
pieces = path.split("/")
blobs = list(gcs_client.list_blobs(pieces[2], prefix="/".join(pieces[3:])))
if len(blobs) > 1:
raise ValueError(f"Found more than one candidate for {path}!")
return Image.from_bytes(blobs[0].download_as_bytes())
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/arcee.py | # This module contains utility classes and functions for interacting with Arcee API.
# For more information and updates, refer to the Arcee utils page:
# [https://github.com/arcee-ai/arcee-python/blob/main/arcee/dalm.py]
from enum import Enum
from typing import Any, Dict, List, Literal, Mapping, Optional, Union
import requests
from langchain_core.retrievers import Document
from pydantic import BaseModel, SecretStr, model_validator
class ArceeRoute(str, Enum):
"""Routes available for the Arcee API as enumerator."""
generate = "models/generate"
retrieve = "models/retrieve"
model_training_status = "models/status/{id_or_name}"
class DALMFilterType(str, Enum):
"""Filter types available for a DALM retrieval as enumerator."""
fuzzy_search = "fuzzy_search"
strict_search = "strict_search"
class DALMFilter(BaseModel):
"""Filters available for a DALM retrieval and generation.
Arguments:
field_name: The field to filter on. Can be 'document' or 'name' to filter
on your document's raw text or title. Any other field will be presumed
to be a metadata field you included when uploading your context data
filter_type: Currently 'fuzzy_search' and 'strict_search' are supported.
'fuzzy_search' means a fuzzy search on the provided field is performed.
The exact strict doesn't need to exist in the document
for this to find a match.
Very useful for scanning a document for some keyword terms.
'strict_search' means that the exact string must appear
in the provided field.
This is NOT an exact eq filter. ie a document with content
"the happy dog crossed the street" will match on a strict_search of
"dog" but won't match on "the dog".
Python equivalent of `return search_string in full_string`.
value: The actual value to search for in the context data/metadata
"""
field_name: str
filter_type: DALMFilterType
value: str
_is_metadata: bool = False
@model_validator(mode="before")
@classmethod
def set_meta(cls, values: Dict) -> Any:
"""document and name are reserved arcee keys. Anything else is metadata"""
values["_is_meta"] = values.get("field_name") not in ["document", "name"]
return values
class ArceeDocumentSource(BaseModel):
"""Source of an Arcee document."""
document: str
name: str
id: str
class ArceeDocument(BaseModel):
"""Arcee document."""
index: str
id: str
score: float
source: ArceeDocumentSource
class ArceeDocumentAdapter:
"""Adapter for Arcee documents"""
@classmethod
def adapt(cls, arcee_document: ArceeDocument) -> Document:
"""Adapts an `ArceeDocument` to a langchain's `Document` object."""
return Document(
page_content=arcee_document.source.document,
metadata={
# arcee document; source metadata
"name": arcee_document.source.name,
"source_id": arcee_document.source.id,
# arcee document metadata
"index": arcee_document.index,
"id": arcee_document.id,
"score": arcee_document.score,
},
)
class ArceeWrapper:
"""Wrapper for Arcee API.
For more details, see: https://www.arcee.ai/
"""
def __init__(
self,
arcee_api_key: Union[str, SecretStr],
arcee_api_url: str,
arcee_api_version: str,
model_kwargs: Optional[Dict[str, Any]],
model_name: str,
):
"""Initialize ArceeWrapper.
Arguments:
arcee_api_key: API key for Arcee API.
arcee_api_url: URL for Arcee API.
arcee_api_version: Version of Arcee API.
model_kwargs: Keyword arguments for Arcee API.
model_name: Name of an Arcee model.
"""
if isinstance(arcee_api_key, str):
arcee_api_key_ = SecretStr(arcee_api_key)
else:
arcee_api_key_ = arcee_api_key
self.arcee_api_key: SecretStr = arcee_api_key_
self.model_kwargs = model_kwargs
self.arcee_api_url = arcee_api_url
self.arcee_api_version = arcee_api_version
try:
route = ArceeRoute.model_training_status.value.format(id_or_name=model_name)
response = self._make_request("get", route)
self.model_id = response.get("model_id")
self.model_training_status = response.get("status")
except Exception as e:
raise ValueError(
f"Error while validating model training status for '{model_name}': {e}"
) from e
def validate_model_training_status(self) -> None:
if self.model_training_status != "training_complete":
raise Exception(
f"Model {self.model_id} is not ready. "
"Please wait for training to complete."
)
def _make_request(
self,
method: Literal["post", "get"],
route: Union[ArceeRoute, str],
body: Optional[Mapping[str, Any]] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
) -> dict:
"""Make a request to the Arcee API
Args:
method: The HTTP method to use
route: The route to call
body: The body of the request
params: The query params of the request
headers: The headers of the request
"""
headers = self._make_request_headers(headers=headers)
url = self._make_request_url(route=route)
req_type = getattr(requests, method)
response = req_type(url, json=body, params=params, headers=headers)
if response.status_code not in (200, 201):
raise Exception(f"Failed to make request. Response: {response.text}")
return response.json()
def _make_request_headers(self, headers: Optional[Dict] = None) -> Dict:
headers = headers or {}
if not isinstance(self.arcee_api_key, SecretStr):
raise TypeError(
f"arcee_api_key must be a SecretStr. Got {type(self.arcee_api_key)}"
)
api_key = self.arcee_api_key.get_secret_value()
internal_headers = {
"X-Token": api_key,
"Content-Type": "application/json",
}
headers.update(internal_headers)
return headers
def _make_request_url(self, route: Union[ArceeRoute, str]) -> str:
return f"{self.arcee_api_url}/{self.arcee_api_version}/{route}"
def _make_request_body_for_models(
self, prompt: str, **kwargs: Mapping[str, Any]
) -> Mapping[str, Any]:
"""Make the request body for generate/retrieve models endpoint"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
filters = [DALMFilter(**f) for f in _params.get("filters", [])]
return dict(
model_id=self.model_id,
query=prompt,
size=_params.get("size", 3),
filters=filters,
id=self.model_id,
)
def generate(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve. Defaults to 3.
(Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
response = self._make_request(
method="post",
route=ArceeRoute.generate.value,
body=self._make_request_body_for_models(
prompt=prompt,
**kwargs,
),
)
return response["text"]
def retrieve(
self,
query: str,
**kwargs: Any,
) -> List[Document]:
"""Retrieve {size} contexts with your retriever for a given query
Args:
query: Query to submit to the model
size: The max number of context results to retrieve. Defaults to 3.
(Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
response = self._make_request(
method="post",
route=ArceeRoute.retrieve.value,
body=self._make_request_body_for_models(
prompt=query,
**kwargs,
),
)
return [
ArceeDocumentAdapter.adapt(ArceeDocument(**doc))
for doc in response["results"]
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/bing_search.py | """Util that calls Bing Search."""
from typing import Any, Dict, List
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
# BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API.
# Currently There are two web-based Bing Search services available on Azure,
# i.e. Bing Web Search[1] and Bing Custom Search[2]. Compared to Bing Custom Search,
# Both services that provides a wide range of search results, while Bing Custom
# Search requires you to provide an additional custom search instance, `customConfig`.
# Both services are available for BingSearchAPIWrapper.
# History of Azure Bing Search API:
# Before shown in Azure Marketplace as a separate service, Bing Search APIs were
# part of Azure Cognitive Services, the endpoint of which is unique, and the user
# must specify the endpoint when making a request. After transitioning to Azure
# Marketplace, the endpoint is standardized and the user does not need to specify
# the endpoint[3].
# Reference:
# 1. https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/overview
# 2. https://learn.microsoft.com/en-us/bing/search-apis/bing-custom-search/overview
# 3. https://azure.microsoft.com/en-in/updates/bing-search-apis-will-transition-from-azure-cognitive-services-to-azure-marketplace-on-31-october-2023/
DEFAULT_BING_SEARCH_ENDPOINT = "https://api.bing.microsoft.com/v7.0/search"
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Web Search API."""
bing_subscription_key: str
bing_search_url: str
k: int = 10
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
model_config = ConfigDict(
extra="forbid",
)
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
**self.search_kwargs,
}
response = requests.get(
self.bing_search_url,
headers=headers,
params=params, # type: ignore
)
response.raise_for_status()
search_results = response.json()
if "webPages" in search_results:
return search_results["webPages"]["value"]
return []
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
default=DEFAULT_BING_SEARCH_ENDPOINT,
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/nasa.py | """Util that calls several NASA APIs."""
import json
import requests
from pydantic import BaseModel
IMAGE_AND_VIDEO_LIBRARY_URL = "https://images-api.nasa.gov"
class NasaAPIWrapper(BaseModel):
"""Wrapper for NASA API."""
def get_media(self, query: str) -> str:
params = json.loads(query)
if params.get("q"):
queryText = params["q"]
params.pop("q")
else:
queryText = ""
response = requests.get(
IMAGE_AND_VIDEO_LIBRARY_URL + "/search?q=" + queryText, params=params
)
data = response.json()
return data
def get_media_metadata_manifest(self, query: str) -> str:
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + "/asset/" + query)
return response.json()
def get_media_metadata_location(self, query: str) -> str:
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + "/metadata/" + query)
return response.json()
def get_video_captions_location(self, query: str) -> str:
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + "/captions/" + query)
return response.json()
def run(self, mode: str, query: str) -> str:
if mode == "search_media":
output = self.get_media(query)
elif mode == "get_media_metadata_manifest":
output = self.get_media_metadata_manifest(query)
elif mode == "get_media_metadata_location":
output = self.get_media_metadata_location(query)
elif mode == "get_video_captions_location":
output = self.get_video_captions_location(query)
else:
output = f"ModeError: Got unexpected mode {mode}."
try:
return json.dumps(output)
except Exception:
return str(output)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_search.py | """Util that calls Google Search."""
from typing import Any, Dict, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchAPIWrapper",
)
class GoogleSearchAPIWrapper(BaseModel):
"""Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don't already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
2. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
3. To create an API key:
- Navigate to the APIs & Services → Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
Alternatively, you can just generate an API key here:
https://developers.google.com/custom-search/docs/paid_element#api_key
4. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine here: https://programmablesearchengine.google.com/.
- In `What to search` to search, pick the `Search the entire Web` option.
After search engine is created, you can click on it and find `Search engine ID`
on the Overview page.
"""
search_engine: Any = None #: :meta private:
google_api_key: Optional[str] = None
google_cse_id: Optional[str] = None
k: int = 10
siterestrict: bool = False
model_config = ConfigDict(
extra="forbid",
)
def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get("items", [])
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
values["google_api_key"] = google_api_key
google_cse_id = get_from_dict_or_env(values, "google_cse_id", "GOOGLE_CSE_ID")
values["google_cse_id"] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
"google-api-python-client is not installed. "
"Please install it with `pip install google-api-python-client"
">=2.100.0`"
)
service = build("customsearch", "v1", developerKey=google_api_key)
values["search_engine"] = service
return values
def run(self, query: str) -> str:
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return "No good Google Search Result was found"
for result in results:
if "snippet" in result:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(
self,
query: str,
num_results: int,
search_params: Optional[Dict[str, str]] = None,
) -> List[Dict]:
"""Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
search_params: Parameters to be passed on search
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(
query, num=num_results, **(search_params or {})
)
if len(results) == 0:
return [{"Result": "No good Google Search Result was found"}]
for result in results:
metadata_result = {
"title": result["title"],
"link": result["link"],
}
if "snippet" in result:
metadata_result["snippet"] = result["snippet"]
metadata_results.append(metadata_result)
return metadata_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/merriam_webster.py | """Util that calls Merriam-Webster."""
import json
from typing import Any, Dict, Iterator, List, Optional
from urllib.parse import quote
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
MERRIAM_WEBSTER_API_URL = (
"https://www.dictionaryapi.com/api/v3/references/collegiate/json"
)
MERRIAM_WEBSTER_TIMEOUT = 5000
class MerriamWebsterAPIWrapper(BaseModel):
"""Wrapper for Merriam-Webster.
Docs for using:
1. Go to https://www.dictionaryapi.com/register/index and register an
developer account with a key for the Collegiate Dictionary
2. Get your API Key from https://www.dictionaryapi.com/account/my-keys
3. Save your API Key into MERRIAM_WEBSTER_API_KEY env variable
"""
merriam_webster_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
merriam_webster_api_key = get_from_dict_or_env(
values, "merriam_webster_api_key", "MERRIAM_WEBSTER_API_KEY"
)
values["merriam_webster_api_key"] = merriam_webster_api_key
return values
def run(self, query: str) -> str:
"""Run query through Merriam-Webster API and return a formatted result."""
quoted_query = quote(query)
request_url = (
f"{MERRIAM_WEBSTER_API_URL}/{quoted_query}"
f"?key={self.merriam_webster_api_key}"
)
response = requests.get(request_url, timeout=MERRIAM_WEBSTER_TIMEOUT)
if response.status_code != 200:
return response.text
return self._format_response(query, response)
def _format_response(self, query: str, response: requests.Response) -> str:
content = json.loads(response.content)
if not content:
return f"No Merriam-Webster definition was found for query '{query}'."
if isinstance(content[0], str):
result = f"No Merriam-Webster definition was found for query '{query}'.\n"
if len(content) > 1:
alternatives = [f"{i + 1}. {content[i]}" for i in range(len(content))]
result += "You can try one of the following alternative queries:\n\n"
result += "\n".join(alternatives)
else:
result += f"Did you mean '{content[0]}'?"
else:
result = self._format_definitions(query, content)
return result
def _format_definitions(self, query: str, definitions: List[Dict]) -> str:
formatted_definitions: List[str] = []
for definition in definitions:
formatted_definitions.extend(self._format_definition(definition))
if len(formatted_definitions) == 1:
return f"Definition of '{query}':\n" f"{formatted_definitions[0]}"
result = f"Definitions of '{query}':\n\n"
for i, formatted_definition in enumerate(formatted_definitions, 1):
result += f"{i}. {formatted_definition}\n"
return result
def _format_definition(self, definition: Dict) -> Iterator[str]:
if "hwi" in definition:
headword = definition["hwi"]["hw"].replace("*", "-")
else:
headword = definition["meta"]["id"].split(":")[0]
if "fl" in definition:
functional_label = definition["fl"]
if "shortdef" in definition:
for short_def in definition["shortdef"]:
yield f"{headword}, {functional_label}: {short_def}"
else:
yield f"{headword}, {functional_label}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/cassandra_database.py | """Apache Cassandra database wrapper."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import Self
if TYPE_CHECKING:
from cassandra.cluster import ResultSet, Session
IGNORED_KEYSPACES = [
"system",
"system_auth",
"system_distributed",
"system_schema",
"system_traces",
"system_views",
"datastax_sla",
"data_endpoint_auth",
]
class CassandraDatabase:
"""Apache Cassandra® database wrapper."""
def __init__(
self,
session: Optional[Session] = None,
exclude_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
cassio_init_kwargs: Optional[Dict[str, Any]] = None,
):
_session = self._resolve_session(session, cassio_init_kwargs)
if not _session:
raise ValueError("Session not provided and cannot be resolved")
self._session = _session
self._exclude_keyspaces = IGNORED_KEYSPACES
self._exclude_tables = exclude_tables or []
self._include_tables = include_tables or []
def run(
self,
query: str,
fetch: str = "all",
**kwargs: Any,
) -> Union[list, Dict[str, Any], ResultSet]:
"""Execute a CQL query and return the results."""
if fetch == "all":
return self.fetch_all(query, **kwargs)
elif fetch == "one":
return self.fetch_one(query, **kwargs)
elif fetch == "cursor":
return self._fetch(query, **kwargs)
else:
raise ValueError("Fetch parameter must be either 'one', 'all', or 'cursor'")
def _fetch(self, query: str, **kwargs: Any) -> ResultSet:
clean_query = self._validate_cql(query, "SELECT")
return self._session.execute(clean_query, **kwargs)
def fetch_all(self, query: str, **kwargs: Any) -> list:
return list(self._fetch(query, **kwargs))
def fetch_one(self, query: str, **kwargs: Any) -> Dict[str, Any]:
result = self._fetch(query, **kwargs)
return result.one()._asdict() if result else {}
def get_keyspace_tables(self, keyspace: str) -> List[Table]:
"""Get the Table objects for the specified keyspace."""
schema = self._resolve_schema([keyspace])
if keyspace in schema:
return schema[keyspace]
else:
return []
# This is a more basic string building function that doesn't use a query builder
# or prepared statements
# TODO: Refactor to use prepared statements
def get_table_data(
self, keyspace: str, table: str, predicate: str, limit: int
) -> str:
"""Get data from the specified table in the specified keyspace."""
query = f"SELECT * FROM {keyspace}.{table}"
if predicate:
query += f" WHERE {predicate}"
if limit:
query += f" LIMIT {limit}"
query += ";"
result = self.fetch_all(query)
data = "\n".join(str(row) for row in result)
return data
def get_context(self) -> Dict[str, Any]:
"""Return db context that you may want in agent prompt."""
keyspaces = self._fetch_keyspaces()
return {"keyspaces": ", ".join(keyspaces)}
def format_keyspace_to_markdown(
self, keyspace: str, tables: Optional[List[Table]] = None
) -> str:
"""
Generates a markdown representation of the schema for a specific keyspace
by iterating over all tables within that keyspace and calling their
as_markdown method.
Args:
keyspace: The name of the keyspace to generate markdown documentation for.
tables: list of tables in the keyspace; it will be resolved if not provided.
Returns:
A string containing the markdown representation of the specified
keyspace schema.
"""
if not tables:
tables = self.get_keyspace_tables(keyspace)
if tables:
output = f"## Keyspace: {keyspace}\n\n"
if tables:
for table in tables:
output += table.as_markdown(include_keyspace=False, header_level=3)
output += "\n\n"
else:
output += "No tables present in keyspace\n\n"
return output
else:
return ""
def format_schema_to_markdown(self) -> str:
"""
Generates a markdown representation of the schema for all keyspaces and tables
within the CassandraDatabase instance. This method utilizes the
format_keyspace_to_markdown method to create markdown sections for each
keyspace, assembling them into a comprehensive schema document.
Iterates through each keyspace in the database, utilizing
format_keyspace_to_markdown to generate markdown for each keyspace's schema,
including details of its tables. These sections are concatenated to form a
single markdown document that represents the schema of the entire database or
the subset of keyspaces that have been resolved in this instance.
Returns:
A markdown string that documents the schema of all resolved keyspaces and
their tables within this CassandraDatabase instance. This includes keyspace
names, table names, comments, columns, partition keys, clustering keys,
and indexes for each table.
"""
schema = self._resolve_schema()
output = "# Cassandra Database Schema\n\n"
for keyspace, tables in schema.items():
output += f"{self.format_keyspace_to_markdown(keyspace, tables)}\n\n"
return output
def _validate_cql(self, cql: str, type: str = "SELECT") -> str:
"""
Validates a CQL query string for basic formatting and safety checks.
Ensures that `cql` starts with the specified type (e.g., SELECT) and does
not contain content that could indicate CQL injection vulnerabilities.
Args:
cql: The CQL query string to be validated.
type: The expected starting keyword of the query, used to verify
that the query begins with the correct operation type
(e.g., "SELECT", "UPDATE"). Defaults to "SELECT".
Returns:
The trimmed and validated CQL query string without a trailing semicolon.
Raises:
ValueError: If the value of `type` is not supported
DatabaseError: If `cql` is considered unsafe
"""
SUPPORTED_TYPES = ["SELECT"]
if type and type.upper() not in SUPPORTED_TYPES:
raise ValueError(
f"""Unsupported CQL type: {type}. Supported types:
{SUPPORTED_TYPES}"""
)
# Basic sanity checks
cql_trimmed = cql.strip()
if not cql_trimmed.upper().startswith(type.upper()):
raise DatabaseError(f"CQL must start with {type.upper()}.")
# Allow a trailing semicolon, but remove (it is optional with the Python driver)
cql_trimmed = cql_trimmed.rstrip(";")
# Consider content within matching quotes to be "safe"
# Remove single-quoted strings
cql_sanitized = re.sub(r"'.*?'", "", cql_trimmed)
# Remove double-quoted strings
cql_sanitized = re.sub(r'".*?"', "", cql_sanitized)
# Find unsafe content in the remaining CQL
if ";" in cql_sanitized:
raise DatabaseError(
"""Potentially unsafe CQL, as it contains a ; at a
place other than the end or within quotation marks."""
)
# The trimmed query, before modifications
return cql_trimmed
def _fetch_keyspaces(self, keyspaces: Optional[List[str]] = None) -> List[str]:
"""
Fetches a list of keyspace names from the Cassandra database. The list can be
filtered by a provided list of keyspace names or by excluding predefined
keyspaces.
Args:
keyspaces: A list of keyspace names to specifically include.
If provided and not empty, the method returns only the keyspaces
present in this list.
If not provided or empty, the method returns all keyspaces except those
specified in the _exclude_keyspaces attribute.
Returns:
A list of keyspace names according to the filtering criteria.
"""
all_keyspaces = self.fetch_all(
"SELECT keyspace_name FROM system_schema.keyspaces"
)
# Filtering keyspaces based on 'keyspace_list' and '_exclude_keyspaces'
filtered_keyspaces = []
for ks in all_keyspaces:
if not isinstance(ks, Dict):
continue # Skip if the row is not a dictionary.
keyspace_name = ks["keyspace_name"]
if keyspaces and keyspace_name in keyspaces:
filtered_keyspaces.append(keyspace_name)
elif not keyspaces and keyspace_name not in self._exclude_keyspaces:
filtered_keyspaces.append(keyspace_name)
return filtered_keyspaces
def _format_keyspace_query(self, query: str, keyspaces: List[str]) -> str:
# Construct IN clause for CQL query
keyspace_in_clause = ", ".join([f"'{ks}'" for ks in keyspaces])
return f"""{query} WHERE keyspace_name IN ({keyspace_in_clause})"""
def _fetch_tables_data(self, keyspaces: List[str]) -> list:
"""Fetches tables schema data, filtered by a list of keyspaces.
This method allows for efficiently fetching schema information for multiple
keyspaces in a single operation, enabling applications to programmatically
analyze or document the database schema.
Args:
keyspaces: A list of keyspace names from which to fetch tables schema data.
Returns:
Dictionaries of table details (keyspace name, table name, and comment).
"""
tables_query = self._format_keyspace_query(
"SELECT keyspace_name, table_name, comment FROM system_schema.tables",
keyspaces,
)
return self.fetch_all(tables_query)
def _fetch_columns_data(self, keyspaces: List[str]) -> list:
"""Fetches columns schema data, filtered by a list of keyspaces.
This method allows for efficiently fetching schema information for multiple
keyspaces in a single operation, enabling applications to programmatically
analyze or document the database schema.
Args:
keyspaces: A list of keyspace names from which to fetch tables schema data.
Returns:
Dictionaries of column details (keyspace name, table name, column name,
type, kind, and position).
"""
tables_query = self._format_keyspace_query(
"""
SELECT keyspace_name, table_name, column_name, type, kind,
clustering_order, position
FROM system_schema.columns
""",
keyspaces,
)
return self.fetch_all(tables_query)
def _fetch_indexes_data(self, keyspaces: List[str]) -> list:
"""Fetches indexes schema data, filtered by a list of keyspaces.
This method allows for efficiently fetching schema information for multiple
keyspaces in a single operation, enabling applications to programmatically
analyze or document the database schema.
Args:
keyspaces: A list of keyspace names from which to fetch tables schema data.
Returns:
Dictionaries of index details (keyspace name, table name, index name, kind,
and options).
"""
tables_query = self._format_keyspace_query(
"""
SELECT keyspace_name, table_name, index_name,
kind, options
FROM system_schema.indexes
""",
keyspaces,
)
return self.fetch_all(tables_query)
def _resolve_schema(
self, keyspaces: Optional[List[str]] = None
) -> Dict[str, List[Table]]:
"""
Efficiently fetches and organizes Cassandra table schema information,
such as comments, columns, and indexes, into a dictionary mapping keyspace
names to lists of Table objects.
Args:
keyspaces: An optional list of keyspace names from which to fetch tables
schema data.
Returns:
A dictionary with keyspace names as keys and lists of Table objects as
values, where each Table object is populated with schema details
appropriate for its keyspace and table name.
"""
if not keyspaces:
keyspaces = self._fetch_keyspaces()
tables_data = self._fetch_tables_data(keyspaces)
columns_data = self._fetch_columns_data(keyspaces)
indexes_data = self._fetch_indexes_data(keyspaces)
keyspace_dict: dict = {}
for table_data in tables_data:
keyspace = table_data.keyspace_name
table_name = table_data.table_name
comment = table_data.comment
if self._include_tables and table_name not in self._include_tables:
continue
if self._exclude_tables and table_name in self._exclude_tables:
continue
# Filter columns and indexes for this table
table_columns = [
(c.column_name, c.type)
for c in columns_data
if c.keyspace_name == keyspace and c.table_name == table_name
]
partition_keys = [
c.column_name
for c in columns_data
if c.kind == "partition_key"
and c.keyspace_name == keyspace
and c.table_name == table_name
]
clustering_keys = [
(c.column_name, c.clustering_order)
for c in columns_data
if c.kind == "clustering"
and c.keyspace_name == keyspace
and c.table_name == table_name
]
table_indexes = [
(c.index_name, c.kind, c.options)
for c in indexes_data
if c.keyspace_name == keyspace and c.table_name == table_name
]
table_obj = Table(
keyspace=keyspace,
table_name=table_name,
comment=comment,
columns=table_columns,
partition=partition_keys,
clustering=clustering_keys,
indexes=table_indexes,
)
if keyspace not in keyspace_dict:
keyspace_dict[keyspace] = []
keyspace_dict[keyspace].append(table_obj)
return keyspace_dict
@staticmethod
def _resolve_session(
session: Optional[Session] = None,
cassio_init_kwargs: Optional[Dict[str, Any]] = None,
) -> Optional[Session]:
"""
Attempts to resolve and return a Session object for use in database operations.
This function follows a specific order of precedence to determine the
appropriate session to use:
1. `session` parameter if given,
2. Existing `cassio` session,
3. A new `cassio` session derived from `cassio_init_kwargs`,
4. `None`
Args:
session: An optional session to use directly.
cassio_init_kwargs: An optional dictionary of keyword arguments to `cassio`.
Returns:
The resolved session object if successful, or `None` if the session
cannot be resolved.
Raises:
ValueError: If `cassio_init_kwargs` is provided but is not a dictionary of
keyword arguments.
"""
# Prefer given session
if session:
return session
# If a session is not provided, create one using cassio if available
# dynamically import cassio to avoid circular imports
try:
import cassio.config
except ImportError:
raise ValueError(
"cassio package not found, please install with" " `pip install cassio`"
)
# Use pre-existing session on cassio
s = cassio.config.resolve_session()
if s:
return s
# Try to init and return cassio session
if cassio_init_kwargs:
if isinstance(cassio_init_kwargs, dict):
cassio.init(**cassio_init_kwargs)
s = cassio.config.check_resolve_session()
return s
else:
raise ValueError("cassio_init_kwargs must be a keyword dictionary")
# return None if we're not able to resolve
return None
class DatabaseError(Exception):
"""Exception raised for errors in the database schema.
Attributes:
message -- explanation of the error
"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class Table(BaseModel):
keyspace: str
"""The keyspace in which the table exists."""
table_name: str
"""The name of the table."""
comment: Optional[str] = None
"""The comment associated with the table."""
columns: List[Tuple[str, str]] = Field(default_factory=list)
partition: List[str] = Field(default_factory=list)
clustering: List[Tuple[str, str]] = Field(default_factory=list)
indexes: List[Tuple[str, str, str]] = Field(default_factory=list)
model_config = ConfigDict(
frozen=True,
)
@model_validator(mode="after")
def check_required_fields(self) -> Self:
if not self.columns:
raise ValueError("non-empty column list for must be provided")
if not self.partition:
raise ValueError("non-empty partition list must be provided")
return self
@classmethod
def from_database(
cls, keyspace: str, table_name: str, db: CassandraDatabase
) -> Table:
columns, partition, clustering = cls._resolve_columns(keyspace, table_name, db)
return cls(
keyspace=keyspace,
table_name=table_name,
comment=cls._resolve_comment(keyspace, table_name, db),
columns=columns,
partition=partition,
clustering=clustering,
indexes=cls._resolve_indexes(keyspace, table_name, db),
)
def as_markdown(
self, include_keyspace: bool = True, header_level: Optional[int] = None
) -> str:
"""
Generates a Markdown representation of the Cassandra table schema, allowing for
customizable header levels for the table name section.
Args:
include_keyspace: If True, includes the keyspace in the output.
Defaults to True.
header_level: Specifies the markdown header level for the table name.
If None, the table name is included without a header.
Defaults to None (no header level).
Returns:
A string in Markdown format detailing the table name
(with optional header level), keyspace (optional), comment, columns,
partition keys, clustering keys (with optional clustering order),
and indexes.
"""
output = ""
if header_level is not None:
output += f"{'#' * header_level} "
output += f"Table Name: {self.table_name}\n"
if include_keyspace:
output += f"- Keyspace: {self.keyspace}\n"
if self.comment:
output += f"- Comment: {self.comment}\n"
output += "- Columns\n"
for column, type in self.columns:
output += f" - {column} ({type})\n"
output += f"- Partition Keys: ({', '.join(self.partition)})\n"
output += "- Clustering Keys: "
if self.clustering:
cluster_list = []
for column, clustering_order in self.clustering:
if clustering_order.lower() == "none":
cluster_list.append(column)
else:
cluster_list.append(f"{column} {clustering_order}")
output += f"({', '.join(cluster_list)})\n"
if self.indexes:
output += "- Indexes\n"
for name, kind, options in self.indexes:
output += f" - {name} : kind={kind}, options={options}\n"
return output
@staticmethod
def _resolve_comment(
keyspace: str, table_name: str, db: CassandraDatabase
) -> Optional[str]:
result = db.run(
f"""SELECT comment
FROM system_schema.tables
WHERE keyspace_name = '{keyspace}'
AND table_name = '{table_name}';""",
fetch="one",
)
if isinstance(result, dict):
comment = result.get("comment")
if comment:
return comment
else:
return None # Default comment if none is found
else:
raise ValueError(
f"""Unexpected result type from db.run:
{type(result).__name__}"""
)
@staticmethod
def _resolve_columns(
keyspace: str, table_name: str, db: CassandraDatabase
) -> Tuple[List[Tuple[str, str]], List[str], List[Tuple[str, str]]]:
columns = []
partition_info = []
cluster_info = []
results = db.run(
f"""SELECT column_name, type, kind, clustering_order, position
FROM system_schema.columns
WHERE keyspace_name = '{keyspace}'
AND table_name = '{table_name}';"""
)
# Type check to ensure 'results' is a sequence of dictionaries.
if not isinstance(results, Sequence):
raise TypeError("Expected a sequence of dictionaries from 'run' method.")
for row in results:
if not isinstance(row, Dict):
continue # Skip if the row is not a dictionary.
columns.append((row["column_name"], row["type"]))
if row["kind"] == "partition_key":
partition_info.append((row["column_name"], row["position"]))
elif row["kind"] == "clustering":
cluster_info.append(
(row["column_name"], row["clustering_order"], row["position"])
)
partition = [
column_name for column_name, _ in sorted(partition_info, key=lambda x: x[1])
]
cluster = [
(column_name, clustering_order)
for column_name, clustering_order, _ in sorted(
cluster_info, key=lambda x: x[2]
)
]
return columns, partition, cluster
@staticmethod
def _resolve_indexes(
keyspace: str, table_name: str, db: CassandraDatabase
) -> List[Tuple[str, str, str]]:
indexes = []
results = db.run(
f"""SELECT index_name, kind, options
FROM system_schema.indexes
WHERE keyspace_name = '{keyspace}'
AND table_name = '{table_name}';"""
)
# Type check to ensure 'results' is a sequence of dictionaries
if not isinstance(results, Sequence):
raise TypeError("Expected a sequence of dictionaries from 'run' method.")
for row in results:
if not isinstance(row, Dict):
continue # Skip if the row is not a dictionary.
# Convert 'options' to string if it's not already,
# assuming it's JSON-like and needs conversion
index_options = row["options"]
if not isinstance(index_options, str):
# Assuming index_options needs to be serialized or simply converted
index_options = str(index_options)
indexes.append((row["index_name"], row["kind"], index_options))
return indexes
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_books.py | """Util that calls Google Books."""
from typing import Dict, List, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
GOOGLE_BOOKS_MAX_ITEM_SIZE = 5
GOOGLE_BOOKS_API_URL = "https://www.googleapis.com/books/v1/volumes"
class GoogleBooksAPIWrapper(BaseModel):
"""Wrapper around Google Books API.
To use, you should have a Google Books API key available.
This wrapper will use the Google Books API to conduct searches and
fetch books based on a query passed in by the agents. By default,
it will return the top-k results.
The response for each book will contain the book title, author name, summary, and
a source link.
"""
google_books_api_key: Optional[str] = None
top_k_results: int = GOOGLE_BOOKS_MAX_ITEM_SIZE
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
google_books_api_key = get_from_dict_or_env(
values, "google_books_api_key", "GOOGLE_BOOKS_API_KEY"
)
values["google_books_api_key"] = google_books_api_key
return values
def run(self, query: str) -> str:
# build Url based on API key, query, and max results
params = (
("q", query),
("maxResults", self.top_k_results),
("key", self.google_books_api_key),
)
# send request
response = requests.get(GOOGLE_BOOKS_API_URL, params=params)
json = response.json()
# some error handeling
if response.status_code != 200:
code = response.status_code
error = json.get("error", {}).get("message", "Internal failure")
return f"Unable to retrieve books got status code {code}: {error}"
# send back data
return self._format(query, json.get("items", []))
def _format(self, query: str, books: List) -> str:
if not books:
return f"Sorry no books could be found for your query: {query}"
start = f"Here are {len(books)} suggestions for books related to {query}:"
results = []
results.append(start)
i = 1
for book in books:
info = book["volumeInfo"]
title = info["title"]
authors = self._format_authors(info["authors"])
summary = info["description"]
source = info["infoLink"]
desc = f'{i}. "{title}" by {authors}: {summary}\n'
desc += f"You can read more at {source}"
results.append(desc)
i += 1
return "\n\n".join(results)
def _format_authors(self, authors: List) -> str:
if len(authors) == 1:
return authors[0]
return "{} and {}".format(", ".join(authors[:-1]), authors[-1])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/brave_search.py | import json
from typing import List
import requests
from langchain_core.documents import Document
from pydantic import BaseModel, Field
class BraveSearchWrapper(BaseModel):
"""Wrapper around the Brave search engine."""
api_key: str
"""The API key to use for the Brave search engine."""
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
base_url: str = "https://api.search.brave.com/res/v1/web/search"
"""The base URL for the Brave search engine."""
def run(self, query: str) -> str:
"""Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": " ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
}
for item in web_search_results
]
return json.dumps(final_results)
def download_documents(self, query: str) -> List[Document]:
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [
Document(
page_content=" ".join(
filter(
None, [item.get("description"), *item.get("extra_snippets", [])]
)
),
metadata={"title": item.get("title"), "link": item.get("url")},
)
for item in results
]
def _search_request(self, query: str) -> List[dict]:
headers = {
"X-Subscription-Token": self.api_key,
"Accept": "application/json",
}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query, "extra_snippets": True}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("web", {}).get("results", [])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/scenexplain.py | """Util that calls SceneXplain.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api) and create a new API key.
"""
from typing import Any, Dict
import requests
from langchain_core.utils import from_env, get_from_dict_or_env
from pydantic import BaseModel, Field, model_validator
class SceneXplainAPIWrapper(BaseModel):
"""Wrapper for SceneXplain API.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api)
and create a new API key.
"""
scenex_api_key: str = Field(..., default_factory=from_env("SCENEX_API_KEY")) # type: ignore[call-overload]
scenex_api_url: str = "https://api.scenex.jina.ai/v1/describe"
def _describe_image(self, image: str) -> str:
headers = {
"x-api-key": f"token {self.scenex_api_key}",
"content-type": "application/json",
}
payload = {
"data": [
{
"image": image,
"algorithm": "Ember",
"languages": ["en"],
}
]
}
response = requests.post(self.scenex_api_url, headers=headers, json=payload)
response.raise_for_status()
result = response.json().get("result", [])
img = result[0] if result else {}
return img.get("text", "")
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
scenex_api_key = get_from_dict_or_env(
values, "scenex_api_key", "SCENEX_API_KEY"
)
values["scenex_api_key"] = scenex_api_key
return values
def run(self, image: str) -> str:
"""Run SceneXplain image explainer."""
description = self._describe_image(image)
if not description:
return "No description found."
return description
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/awslambda.py | """Util that calls Lambda."""
import json
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, model_validator
class LambdaWrapper(BaseModel):
"""Wrapper for AWS Lambda SDK.
To use, you should have the ``boto3`` package installed
and a lambda functions built from the AWS Console or
CLI. Set up your AWS credentials with ``aws configure``
Example:
.. code-block:: bash
pip install boto3
aws configure
"""
lambda_client: Any = None #: :meta private:
"""The configured boto3 client"""
function_name: Optional[str] = None
"""The name of your lambda function"""
awslambda_tool_name: Optional[str] = None
"""If passing to an agent as a tool, the tool name"""
awslambda_tool_description: Optional[str] = None
"""If passing to an agent as a tool, the description"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that python package exists in environment."""
try:
import boto3
except ImportError:
raise ImportError(
"boto3 is not installed. Please install it with `pip install boto3`"
)
values["lambda_client"] = boto3.client("lambda")
return values
def run(self, query: str) -> str:
"""
Invokes the lambda function and returns the
result.
Args:
query: an input to passed to the lambda
function as the ``body`` of a JSON
object.
"""
res = self.lambda_client.invoke(
FunctionName=self.function_name,
InvocationType="RequestResponse",
Payload=json.dumps({"body": query}),
)
try:
payload_stream = res["Payload"]
payload_string = payload_stream.read().decode("utf-8")
answer = json.loads(payload_string)["body"]
except StopIteration:
return "Failed to parse response from Lambda"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "Request failed."
else:
return f"Result: {answer}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/__init__.py | """**Utilities** are the integrations with third-part systems and packages.
Other LangChain classes use **Utilities** to interact with third-part systems
and packages.
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.utilities.alpha_vantage import (
AlphaVantageAPIWrapper,
)
from langchain_community.utilities.apify import (
ApifyWrapper,
)
from langchain_community.utilities.arcee import (
ArceeWrapper,
)
from langchain_community.utilities.arxiv import (
ArxivAPIWrapper,
)
from langchain_community.utilities.asknews import (
AskNewsAPIWrapper,
)
from langchain_community.utilities.awslambda import (
LambdaWrapper,
)
from langchain_community.utilities.bibtex import (
BibtexparserWrapper,
)
from langchain_community.utilities.bing_search import (
BingSearchAPIWrapper,
)
from langchain_community.utilities.brave_search import (
BraveSearchWrapper,
)
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
from langchain_community.utilities.dria_index import (
DriaAPIWrapper,
)
from langchain_community.utilities.duckduckgo_search import (
DuckDuckGoSearchAPIWrapper,
)
from langchain_community.utilities.golden_query import (
GoldenQueryAPIWrapper,
)
from langchain_community.utilities.google_books import (
GoogleBooksAPIWrapper,
)
from langchain_community.utilities.google_finance import (
GoogleFinanceAPIWrapper,
)
from langchain_community.utilities.google_jobs import (
GoogleJobsAPIWrapper,
)
from langchain_community.utilities.google_lens import (
GoogleLensAPIWrapper,
)
from langchain_community.utilities.google_places_api import (
GooglePlacesAPIWrapper,
)
from langchain_community.utilities.google_scholar import (
GoogleScholarAPIWrapper,
)
from langchain_community.utilities.google_search import (
GoogleSearchAPIWrapper,
)
from langchain_community.utilities.google_serper import (
GoogleSerperAPIWrapper,
)
from langchain_community.utilities.google_trends import (
GoogleTrendsAPIWrapper,
)
from langchain_community.utilities.graphql import (
GraphQLAPIWrapper,
)
from langchain_community.utilities.infobip import (
InfobipAPIWrapper,
)
from langchain_community.utilities.jira import (
JiraAPIWrapper,
)
from langchain_community.utilities.max_compute import (
MaxComputeAPIWrapper,
)
from langchain_community.utilities.merriam_webster import (
MerriamWebsterAPIWrapper,
)
from langchain_community.utilities.metaphor_search import (
MetaphorSearchAPIWrapper,
)
from langchain_community.utilities.mojeek_search import (
MojeekSearchAPIWrapper,
)
from langchain_community.utilities.nasa import (
NasaAPIWrapper,
)
from langchain_community.utilities.nvidia_riva import (
AudioStream,
NVIDIARivaASR,
NVIDIARivaStream,
NVIDIARivaTTS,
RivaASR,
RivaTTS,
)
from langchain_community.utilities.openweathermap import (
OpenWeatherMapAPIWrapper,
)
from langchain_community.utilities.oracleai import (
OracleSummary,
)
from langchain_community.utilities.outline import (
OutlineAPIWrapper,
)
from langchain_community.utilities.passio_nutrition_ai import (
NutritionAIAPI,
)
from langchain_community.utilities.portkey import (
Portkey,
)
from langchain_community.utilities.powerbi import (
PowerBIDataset,
)
from langchain_community.utilities.pubmed import (
PubMedAPIWrapper,
)
from langchain_community.utilities.rememberizer import RememberizerAPIWrapper
from langchain_community.utilities.requests import (
Requests,
RequestsWrapper,
TextRequestsWrapper,
)
from langchain_community.utilities.scenexplain import (
SceneXplainAPIWrapper,
)
from langchain_community.utilities.searchapi import (
SearchApiAPIWrapper,
)
from langchain_community.utilities.searx_search import (
SearxSearchWrapper,
)
from langchain_community.utilities.serpapi import (
SerpAPIWrapper,
)
from langchain_community.utilities.spark_sql import (
SparkSQL,
)
from langchain_community.utilities.sql_database import (
SQLDatabase,
)
from langchain_community.utilities.stackexchange import (
StackExchangeAPIWrapper,
)
from langchain_community.utilities.steam import (
SteamWebAPIWrapper,
)
from langchain_community.utilities.tensorflow_datasets import (
TensorflowDatasets,
)
from langchain_community.utilities.twilio import (
TwilioAPIWrapper,
)
from langchain_community.utilities.wikipedia import (
WikipediaAPIWrapper,
)
from langchain_community.utilities.wolfram_alpha import (
WolframAlphaAPIWrapper,
)
from langchain_community.utilities.you import (
YouSearchAPIWrapper,
)
from langchain_community.utilities.zapier import (
ZapierNLAWrapper,
)
__all__ = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"AskNewsAPIWrapper",
"AudioStream",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DataheraldAPIWrapper",
"DriaAPIWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GoogleBooksAPIWrapper",
"GoogleFinanceAPIWrapper",
"GoogleJobsAPIWrapper",
"GoogleLensAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GoogleTrendsAPIWrapper",
"GraphQLAPIWrapper",
"InfobipAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MerriamWebsterAPIWrapper",
"MetaphorSearchAPIWrapper",
"MojeekSearchAPIWrapper",
"NVIDIARivaASR",
"NVIDIARivaStream",
"NVIDIARivaTTS",
"NasaAPIWrapper",
"NutritionAIAPI",
"OpenWeatherMapAPIWrapper",
"OracleSummary",
"OutlineAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"RememberizerAPIWrapper",
"Requests",
"RequestsWrapper",
"RivaASR",
"RivaTTS",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SQLDatabase",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"StackExchangeAPIWrapper",
"SteamWebAPIWrapper",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"YouSearchAPIWrapper",
"ZapierNLAWrapper",
]
_module_lookup = {
"AlphaVantageAPIWrapper": "langchain_community.utilities.alpha_vantage",
"ApifyWrapper": "langchain_community.utilities.apify",
"ArceeWrapper": "langchain_community.utilities.arcee",
"ArxivAPIWrapper": "langchain_community.utilities.arxiv",
"AskNewsAPIWrapper": "langchain_community.utilities.asknews",
"AudioStream": "langchain_community.utilities.nvidia_riva",
"BibtexparserWrapper": "langchain_community.utilities.bibtex",
"BingSearchAPIWrapper": "langchain_community.utilities.bing_search",
"BraveSearchWrapper": "langchain_community.utilities.brave_search",
"DataheraldAPIWrapper": "langchain_community.utilities.dataherald",
"DriaAPIWrapper": "langchain_community.utilities.dria_index",
"DuckDuckGoSearchAPIWrapper": "langchain_community.utilities.duckduckgo_search",
"GoldenQueryAPIWrapper": "langchain_community.utilities.golden_query",
"GoogleBooksAPIWrapper": "langchain_community.utilities.google_books",
"GoogleFinanceAPIWrapper": "langchain_community.utilities.google_finance",
"GoogleJobsAPIWrapper": "langchain_community.utilities.google_jobs",
"GoogleLensAPIWrapper": "langchain_community.utilities.google_lens",
"GooglePlacesAPIWrapper": "langchain_community.utilities.google_places_api",
"GoogleScholarAPIWrapper": "langchain_community.utilities.google_scholar",
"GoogleSearchAPIWrapper": "langchain_community.utilities.google_search",
"GoogleSerperAPIWrapper": "langchain_community.utilities.google_serper",
"GoogleTrendsAPIWrapper": "langchain_community.utilities.google_trends",
"GraphQLAPIWrapper": "langchain_community.utilities.graphql",
"InfobipAPIWrapper": "langchain_community.utilities.infobip",
"JiraAPIWrapper": "langchain_community.utilities.jira",
"LambdaWrapper": "langchain_community.utilities.awslambda",
"MaxComputeAPIWrapper": "langchain_community.utilities.max_compute",
"MerriamWebsterAPIWrapper": "langchain_community.utilities.merriam_webster",
"MetaphorSearchAPIWrapper": "langchain_community.utilities.metaphor_search",
"MojeekSearchAPIWrapper": "langchain_community.utilities.mojeek_search",
"NVIDIARivaASR": "langchain_community.utilities.nvidia_riva",
"NVIDIARivaStream": "langchain_community.utilities.nvidia_riva",
"NVIDIARivaTTS": "langchain_community.utilities.nvidia_riva",
"NasaAPIWrapper": "langchain_community.utilities.nasa",
"NutritionAIAPI": "langchain_community.utilities.passio_nutrition_ai",
"OpenWeatherMapAPIWrapper": "langchain_community.utilities.openweathermap",
"OracleSummary": "langchain_community.utilities.oracleai",
"OutlineAPIWrapper": "langchain_community.utilities.outline",
"Portkey": "langchain_community.utilities.portkey",
"PowerBIDataset": "langchain_community.utilities.powerbi",
"PubMedAPIWrapper": "langchain_community.utilities.pubmed",
"RememberizerAPIWrapper": "langchain_community.utilities.rememberizer",
"Requests": "langchain_community.utilities.requests",
"RequestsWrapper": "langchain_community.utilities.requests",
"RivaASR": "langchain_community.utilities.nvidia_riva",
"RivaTTS": "langchain_community.utilities.nvidia_riva",
"SQLDatabase": "langchain_community.utilities.sql_database",
"SceneXplainAPIWrapper": "langchain_community.utilities.scenexplain",
"SearchApiAPIWrapper": "langchain_community.utilities.searchapi",
"SearxSearchWrapper": "langchain_community.utilities.searx_search",
"SerpAPIWrapper": "langchain_community.utilities.serpapi",
"SparkSQL": "langchain_community.utilities.spark_sql",
"StackExchangeAPIWrapper": "langchain_community.utilities.stackexchange",
"SteamWebAPIWrapper": "langchain_community.utilities.steam",
"TensorflowDatasets": "langchain_community.utilities.tensorflow_datasets",
"TextRequestsWrapper": "langchain_community.utilities.requests",
"TwilioAPIWrapper": "langchain_community.utilities.twilio",
"WikipediaAPIWrapper": "langchain_community.utilities.wikipedia",
"WolframAlphaAPIWrapper": "langchain_community.utilities.wolfram_alpha",
"YouSearchAPIWrapper": "langchain_community.utilities.you",
"ZapierNLAWrapper": "langchain_community.utilities.zapier",
}
REMOVED = {
"PythonREPL": (
"PythonREPL has been deprecated from langchain_community "
"due to being flagged by security scanners. See: "
"https://github.com/langchain-ai/langchain/issues/14345 "
"If you need to use it, please use the version "
"from langchain_experimental. "
"from langchain_experimental.utilities.python import PythonREPL."
)
}
def __getattr__(name: str) -> Any:
if name in REMOVED:
raise AssertionError(REMOVED[name])
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_trends.py | """Util that calls Google Scholar Search."""
from typing import Any, Dict, Optional, cast
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
class GoogleTrendsAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Scholar API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleTrendsAPIWrapper
google_trends = GoogleTrendsAPIWrapper()
google_trends.run('langchain')
"""
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_trends",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results: Any = []
client = self.serp_search_engine(params)
client_dict = client.get_dict()
total_results = (
client_dict["interest_over_time"]["timeline_data"]
if "interest_over_time" in client_dict
else None
)
if not total_results:
return "No good Trend Result was found"
start_date = total_results[0]["date"].split()
end_date = total_results[-1]["date"].split()
values = [
results.get("values")[0].get("extracted_value") for results in total_results
]
min_value = min(values)
max_value = max(values)
avg_value = sum(values) / len(values)
percentage_change = (
(values[-1] - values[0])
/ (values[0] if values[0] != 0 else 1)
* (100 if values[0] != 0 else 1)
)
params = {
"engine": "google_trends",
"api_key": serpapi_api_key.get_secret_value(),
"data_type": "RELATED_QUERIES",
"q": query,
}
total_results2 = {}
client = self.serp_search_engine(params)
total_results2 = client.get_dict().get("related_queries", {})
rising = []
top = []
rising = [results.get("query") for results in total_results2.get("rising", [])]
top = [results.get("query") for results in total_results2.get("top", [])]
doc = [
f"Query: {query}\n"
f"Date From: {start_date[0]} {start_date[1]}, {start_date[-1]}\n"
f"Date To: {end_date[0]} {end_date[3]} {end_date[-1]}\n"
f"Min Value: {min_value}\n"
f"Max Value: {max_value}\n"
f"Average Value: {avg_value}\n"
f"Percent Change: {str(percentage_change) + '%'}\n"
f"Trend values: {', '.join([str(x) for x in values])}\n"
f"Rising Related Queries: {', '.join(rising)}\n"
f"Top Related Queries: {', '.join(top)}"
]
return "\n\n".join(doc)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/dataherald.py | """Util that calls Dataherald."""
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class DataheraldAPIWrapper(BaseModel):
"""Wrapper for Dataherald.
Docs for using:
1. Go to dataherald and sign up
2. Create an API key
3. Save your API key into DATAHERALD_API_KEY env variable
4. pip install dataherald
"""
dataherald_client: Any = None #: :meta private:
db_connection_id: str
dataherald_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
dataherald_api_key = get_from_dict_or_env(
values, "dataherald_api_key", "DATAHERALD_API_KEY"
)
values["dataherald_api_key"] = dataherald_api_key
try:
import dataherald
except ImportError:
raise ImportError(
"dataherald is not installed. "
"Please install it with `pip install dataherald`"
)
client = dataherald.Dataherald(api_key=dataherald_api_key)
values["dataherald_client"] = client
return values
def run(self, prompt: str) -> str:
"""Generate a sql query through Dataherald and parse result."""
from dataherald.types.sql_generation_create_params import Prompt
prompt_obj = Prompt(text=prompt, db_connection_id=self.db_connection_id)
res = self.dataherald_client.sql_generations.create(prompt=prompt_obj)
try:
answer = res.sql
if not answer:
# We don't want to return the assumption alone if answer is empty
return "No answer"
else:
return f"Answer: {answer}"
except StopIteration:
return "Dataherald wasn't able to answer it"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/searchapi.py | from typing import Any, Dict, Optional
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class SearchApiAPIWrapper(BaseModel):
"""
Wrapper around SearchApi API.
To use, you should have the environment variable ``SEARCHAPI_API_KEY``
set with your API key, or pass `searchapi_api_key`
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import SearchApiAPIWrapper
searchapi = SearchApiAPIWrapper()
"""
# Use "google" engine by default.
# Full list of supported ones can be found in https://www.searchapi.io docs
engine: str = "google"
searchapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that API key exists in environment."""
searchapi_api_key = get_from_dict_or_env(
values, "searchapi_api_key", "SEARCHAPI_API_KEY"
)
values["searchapi_api_key"] = searchapi_api_key
return values
def run(self, query: str, **kwargs: Any) -> str:
results = self.results(query, **kwargs)
return self._result_as_string(results)
async def arun(self, query: str, **kwargs: Any) -> str:
results = await self.aresults(query, **kwargs)
return self._result_as_string(results)
def results(self, query: str, **kwargs: Any) -> dict:
results = self._search_api_results(query, **kwargs)
return results
async def aresults(self, query: str, **kwargs: Any) -> dict:
results = await self._async_search_api_results(query, **kwargs)
return results
def _prepare_request(self, query: str, **kwargs: Any) -> dict:
return {
"url": "https://www.searchapi.io/api/v1/search",
"headers": {
"Authorization": f"Bearer {self.searchapi_api_key}",
},
"params": {
"engine": self.engine,
"q": query,
**{key: value for key, value in kwargs.items() if value is not None},
},
}
def _search_api_results(self, query: str, **kwargs: Any) -> dict:
request_details = self._prepare_request(query, **kwargs)
response = requests.get(
url=request_details["url"],
params=request_details["params"],
headers=request_details["headers"],
)
response.raise_for_status()
return response.json()
async def _async_search_api_results(self, query: str, **kwargs: Any) -> dict:
"""Use aiohttp to send request to SearchApi API and return results async."""
request_details = self._prepare_request(query, **kwargs)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(
url=request_details["url"],
headers=request_details["headers"],
params=request_details["params"],
raise_for_status=True,
) as response:
results = await response.json()
else:
async with self.aiosession.get(
url=request_details["url"],
headers=request_details["headers"],
params=request_details["params"],
raise_for_status=True,
) as response:
results = await response.json()
return results
@staticmethod
def _result_as_string(result: dict) -> str:
toret = "No good search result found"
if "answer_box" in result.keys() and "answer" in result["answer_box"].keys():
toret = result["answer_box"]["answer"]
elif "answer_box" in result.keys() and "snippet" in result["answer_box"].keys():
toret = result["answer_box"]["snippet"]
elif "knowledge_graph" in result.keys():
toret = result["knowledge_graph"]["description"]
elif "organic_results" in result.keys():
snippets = [
r["snippet"] for r in result["organic_results"] if "snippet" in r.keys()
]
toret = "\n".join(snippets)
elif "jobs" in result.keys():
jobs = [
r["description"] for r in result["jobs"] if "description" in r.keys()
]
toret = "\n".join(jobs)
elif "videos" in result.keys():
videos = [
f"""Title: "{r["title"]}" Link: {r["link"]}"""
for r in result["videos"]
if "title" in r.keys()
]
toret = "\n".join(videos)
elif "images" in result.keys():
images = [
f"""Title: "{r["title"]}" Link: {r["original"]["link"]}"""
for r in result["images"]
if "original" in r.keys()
]
toret = "\n".join(images)
return toret
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/infobip.py | """Util that sends messages via Infobip."""
from typing import Any, Dict, List, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
class InfobipAPIWrapper(BaseModel):
"""Wrapper for Infobip API for messaging."""
infobip_api_key: Optional[str] = None
infobip_base_url: Optional[str] = "https://api.infobip.com"
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
values["infobip_api_key"] = get_from_dict_or_env(
values, "infobip_api_key", "INFOBIP_API_KEY"
)
values["infobip_base_url"] = get_from_dict_or_env(
values, "infobip_base_url", "INFOBIP_BASE_URL"
)
return values
def _get_requests_session(self) -> requests.Session:
"""Get a requests session with the correct headers."""
retry_strategy: Retry = Retry(
total=4, # Maximum number of retries
backoff_factor=2, # Exponential backoff factor
status_forcelist=[429, 500, 502, 503, 504], # HTTP status codes to retry on
)
adapter: HTTPAdapter = HTTPAdapter(max_retries=retry_strategy)
session = requests.Session()
session.mount("https://", adapter)
session.headers.update(
{
"Authorization": f"App {self.infobip_api_key}",
"User-Agent": "infobip-langchain-community",
}
)
return session
def _send_sms(
self, sender: str, destination_phone_numbers: List[str], text: str
) -> str:
"""Send an SMS message."""
json: Dict = {
"messages": [
{
"destinations": [
{"to": destination} for destination in destination_phone_numbers
],
"from": sender,
"text": text,
}
]
}
session: requests.Session = self._get_requests_session()
session.headers.update(
{
"Content-Type": "application/json",
}
)
response: requests.Response = session.post(
f"{self.infobip_base_url}/sms/2/text/advanced",
json=json,
)
response_json: Dict = response.json()
try:
if response.status_code != 200:
return response_json["requestError"]["serviceException"]["text"]
except KeyError:
return "Failed to send message"
try:
return response_json["messages"][0]["messageId"]
except KeyError:
return (
"Could not get message ID from response, message was sent successfully"
)
def _send_email(
self, from_email: str, to_email: str, subject: str, body: str
) -> str:
"""Send an email message."""
try:
from requests_toolbelt import MultipartEncoder
except ImportError as e:
raise ImportError(
"Unable to import requests_toolbelt, please install it with "
"`pip install -U requests-toolbelt`."
) from e
form_data: Dict = {
"from": from_email,
"to": to_email,
"subject": subject,
"text": body,
}
data = MultipartEncoder(fields=form_data)
session: requests.Session = self._get_requests_session()
session.headers.update(
{
"Content-Type": data.content_type,
}
)
response: requests.Response = session.post(
f"{self.infobip_base_url}/email/3/send",
data=data,
)
response_json: Dict = response.json()
try:
if response.status_code != 200:
return response_json["requestError"]["serviceException"]["text"]
except KeyError:
return "Failed to send message"
try:
return response_json["messages"][0]["messageId"]
except KeyError:
return (
"Could not get message ID from response, message was sent successfully"
)
def run(
self,
body: str = "",
to: str = "",
sender: str = "",
subject: str = "",
channel: str = "sms",
) -> str:
if channel == "sms":
if sender == "":
raise ValueError("Sender must be specified for SMS messages")
if to == "":
raise ValueError("Destination must be specified for SMS messages")
if body == "":
raise ValueError("Body must be specified for SMS messages")
return self._send_sms(
sender=sender,
destination_phone_numbers=[to],
text=body,
)
elif channel == "email":
if sender == "":
raise ValueError("Sender must be specified for email messages")
if to == "":
raise ValueError("Destination must be specified for email messages")
if subject == "":
raise ValueError("Subject must be specified for email messages")
if body == "":
raise ValueError("Body must be specified for email messages")
return self._send_email(
from_email=sender,
to_email=to,
subject=subject,
body=body,
)
else:
raise ValueError(f"Channel {channel} is not supported")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/steam.py | """Util that calls Steam-WebAPI."""
from typing import Any, List
from pydantic import BaseModel, ConfigDict, model_validator
from langchain_community.tools.steam.prompt import (
STEAM_GET_GAMES_DETAILS,
STEAM_GET_RECOMMENDED_GAMES,
)
class SteamWebAPIWrapper(BaseModel):
"""Wrapper for Steam API."""
steam: Any = None # for python-steam-api
# operations: a list of dictionaries, each representing a specific operation that
# can be performed with the API
operations: List[dict] = [
{
"mode": "get_game_details",
"name": "Get Game Details",
"description": STEAM_GET_GAMES_DETAILS,
},
{
"mode": "get_recommended_games",
"name": "Get Recommended Games",
"description": STEAM_GET_RECOMMENDED_GAMES,
},
]
model_config = ConfigDict(
extra="forbid",
)
def get_operations(self) -> List[dict]:
"""Return a list of operations."""
return self.operations
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate api key and python package has been configured."""
# check if the python package is installed
try:
from steam import Steam
except ImportError:
raise ImportError("python-steam-api library is not installed. ")
try:
from decouple import config
except ImportError:
raise ImportError("decouple library is not installed. ")
# initialize the steam attribute for python-steam-api usage
KEY = config("STEAM_KEY")
steam = Steam(KEY)
values["steam"] = steam
return values
def parse_to_str(self, details: dict) -> str: # For later parsing
"""Parse the details result."""
result = ""
for key, value in details.items():
result += "The " + str(key) + " is: " + str(value) + "\n"
return result
def get_id_link_price(self, games: dict) -> dict:
"""The response may contain more than one game, so we need to choose the right
one and return the id."""
game_info = {}
for app in games["apps"]:
game_info["id"] = app["id"]
game_info["link"] = app["link"]
game_info["price"] = app["price"]
break
return game_info
def remove_html_tags(self, html_string: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_string, "html.parser")
return soup.get_text()
def details_of_games(self, name: str) -> str:
games = self.steam.apps.search_games(name)
info_partOne_dict = self.get_id_link_price(games)
info_partOne = self.parse_to_str(info_partOne_dict)
id = str(info_partOne_dict.get("id"))
info_dict = self.steam.apps.get_app_details(id)
data = info_dict.get(id).get("data")
detailed_description = data.get("detailed_description")
# detailed_description contains <li> <br> some other html tags, so we need to
# remove them
detailed_description = self.remove_html_tags(detailed_description)
supported_languages = info_dict.get(id).get("data").get("supported_languages")
info_partTwo = (
"The summary of the game is: "
+ detailed_description
+ "\n"
+ "The supported languages of the game are: "
+ supported_languages
+ "\n"
)
info = info_partOne + info_partTwo
return info
def get_steam_id(self, name: str) -> str:
user = self.steam.users.search_user(name)
steam_id = user["player"]["steamid"]
return steam_id
def get_users_games(self, steam_id: str) -> List[str]:
return self.steam.users.get_owned_games(steam_id, False, False)
def recommended_games(self, steam_id: str) -> str:
try:
import steamspypi
except ImportError:
raise ImportError("steamspypi library is not installed.")
users_games = self.get_users_games(steam_id)
result = {} # type: ignore
most_popular_genre = ""
most_popular_genre_count = 0
for game in users_games["games"]: # type: ignore
appid = game["appid"]
data_request = {"request": "appdetails", "appid": appid}
genreStore = steamspypi.download(data_request)
genreList = genreStore.get("genre", "").split(", ")
for genre in genreList:
if genre in result:
result[genre] += 1
else:
result[genre] = 1
if result[genre] > most_popular_genre_count:
most_popular_genre_count = result[genre]
most_popular_genre = genre
data_request = dict()
data_request["request"] = "genre"
data_request["genre"] = most_popular_genre
data = steamspypi.download(data_request)
sorted_data = sorted(
data.values(), key=lambda x: x.get("average_forever", 0), reverse=True
)
owned_games = [game["appid"] for game in users_games["games"]] # type: ignore
remaining_games = [
game for game in sorted_data if game["appid"] not in owned_games
]
top_5_popular_not_owned = [game["name"] for game in remaining_games[:5]]
return str(top_5_popular_not_owned)
def run(self, mode: str, game: str) -> str:
if mode == "get_games_details":
return self.details_of_games(game)
elif mode == "get_recommended_games":
return self.recommended_games(game)
else:
raise ValueError(f"Invalid mode {mode} for Steam API.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/graphql.py | import json
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, ConfigDict, model_validator
class GraphQLAPIWrapper(BaseModel):
"""Wrapper around GraphQL API.
To use, you should have the ``gql`` python package installed.
This wrapper will use the GraphQL API to conduct queries.
"""
custom_headers: Optional[Dict[str, str]] = None
fetch_schema_from_transport: Optional[bool] = None
graphql_endpoint: str
gql_client: Any = None #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
"Could not import gql python package. "
f"Try installing it with `pip install gql`. Received error: {e}"
)
headers = values.get("custom_headers")
transport = RequestsHTTPTransport(
url=values["graphql_endpoint"],
headers=headers,
)
fetch_schema_from_transport = values.get("fetch_schema_from_transport", True)
client = Client(
transport=transport, fetch_schema_from_transport=fetch_schema_from_transport
)
values["gql_client"] = client
values["gql_function"] = gql
return values
def run(self, query: str) -> str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2)
def _execute_query(self, query: str) -> Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/max_compute.py | from __future__ import annotations
from typing import TYPE_CHECKING, Iterator, List, Optional
from langchain_core.utils import get_from_env
if TYPE_CHECKING:
from odps import ODPS
class MaxComputeAPIWrapper:
"""Interface for querying Alibaba Cloud MaxCompute tables."""
def __init__(self, client: ODPS):
"""Initialize MaxCompute document loader.
Args:
client: odps.ODPS MaxCompute client object.
"""
self.client = client
@classmethod
def from_params(
cls,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
) -> MaxComputeAPIWrapper:
"""Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
try:
from odps import ODPS
except ImportError as ex:
raise ImportError(
"Could not import pyodps python package. "
"Please install it with `pip install pyodps` or refer to "
"https://pyodps.readthedocs.io/."
) from ex
access_id = access_id or get_from_env("access_id", "MAX_COMPUTE_ACCESS_ID")
secret_access_key = secret_access_key or get_from_env(
"secret_access_key", "MAX_COMPUTE_SECRET_ACCESS_KEY"
)
client = ODPS(
access_id=access_id,
secret_access_key=secret_access_key,
project=project,
endpoint=endpoint,
)
if not client.exist_project(project):
raise ValueError(f'The project "{project}" does not exist.')
return cls(client)
def lazy_query(self, query: str) -> Iterator[dict]:
# Execute SQL query.
with self.client.execute_sql(query).open_reader() as reader:
if reader.count == 0:
raise ValueError("Table contains no data.")
for record in reader:
yield {k: v for k, v in record}
def query(self, query: str) -> List[dict]:
return list(self.lazy_query(query))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/dalle_image_generator.py | """Utility that calls OpenAI's Dall-E Image Generator."""
import logging
from typing import Any, Dict, Mapping, Optional, Tuple, Union
from langchain_core.utils import (
from_env,
get_pydantic_field_names,
secret_from_env,
)
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
class DallEAPIWrapper(BaseModel):
"""Wrapper for OpenAI's DALL-E Image Generator.
https://platform.openai.com/docs/guides/images/generations?context=node
Usage instructions:
1. `pip install openai`
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any = None #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="dall-e-2", alias="model")
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
openai_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env(
"OPENAI_API_KEY",
default=None,
),
)
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(
alias="base_url", default_factory=from_env("OPENAI_API_BASE", default=None)
)
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(
alias="organization",
default_factory=from_env(
["OPENAI_ORG_ID", "OPENAI_ORGANIZATION"], default=None
),
)
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: str = Field(default_factory=from_env("OPENAI_PROXY", default=""))
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
n: int = 1
"""Number of images to generate"""
size: str = "1024x1024"
"""Size of image to generate"""
separator: str = "\n"
"""Separator to use when multiple URLs are returned."""
quality: Optional[str] = "standard"
"""Quality of the image that will be generated"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": self.openai_api_key.get_secret_value()
if self.openai_api_key
else None,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
"http_client": self.http_client,
}
if not self.client:
self.client = openai.OpenAI(**client_params).images # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
if not self.async_client:
self.async_client = openai.AsyncOpenAI(**client_params).images # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
elif not self.client:
self.client = openai.Image # type: ignore[attr-defined]
else:
pass
return self
def run(self, query: str) -> str:
"""Run query through OpenAI and parse result."""
if is_openai_v1():
response = self.client.generate(
prompt=query,
n=self.n,
size=self.size,
model=self.model_name,
quality=self.quality,
)
image_urls = self.separator.join([item.url for item in response.data])
else:
response = self.client.create(
prompt=query, n=self.n, size=self.size, model=self.model_name
)
image_urls = self.separator.join([item["url"] for item in response["data"]])
return image_urls if image_urls else "No image was generated"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any = None #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ImportError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box_list" in res.keys():
res["answer_box"] = res["answer_box_list"]
if "answer_box" in res.keys():
answer_box = res["answer_box"]
if isinstance(answer_box, list):
answer_box = answer_box[0]
if "result" in answer_box.keys():
return answer_box["result"]
elif "answer" in answer_box.keys():
return answer_box["answer"]
elif "snippet" in answer_box.keys():
return answer_box["snippet"]
elif "snippet_highlighted_words" in answer_box.keys():
return answer_box["snippet_highlighted_words"]
else:
answer = {}
for key, value in answer_box.items():
if not isinstance(value, (list, dict)) and not (
isinstance(value, str) and value.startswith("http")
):
answer[key] = value
return str(answer)
elif "events_results" in res.keys():
return res["events_results"][:10]
elif "sports_results" in res.keys():
return res["sports_results"]
elif "top_stories" in res.keys():
return res["top_stories"]
elif "news_results" in res.keys():
return res["news_results"]
elif "jobs_results" in res.keys() and "jobs" in res["jobs_results"].keys():
return res["jobs_results"]["jobs"]
elif (
"shopping_results" in res.keys()
and "title" in res["shopping_results"][0].keys()
):
return res["shopping_results"][:3]
elif "questions_and_answers" in res.keys():
return res["questions_and_answers"]
elif (
"popular_destinations" in res.keys()
and "destinations" in res["popular_destinations"].keys()
):
return res["popular_destinations"]["destinations"]
elif "top_sights" in res.keys() and "sights" in res["top_sights"].keys():
return res["top_sights"]["sights"]
elif (
"images_results" in res.keys()
and "thumbnail" in res["images_results"][0].keys()
):
return str([item["thumbnail"] for item in res["images_results"][:10]])
snippets = []
if "knowledge_graph" in res.keys():
knowledge_graph = res["knowledge_graph"]
title = knowledge_graph["title"] if "title" in knowledge_graph else ""
if "description" in knowledge_graph.keys():
snippets.append(knowledge_graph["description"])
for key, value in knowledge_graph.items():
if (
isinstance(key, str)
and isinstance(value, str)
and key not in ["title", "description"]
and not key.endswith("_stick")
and not key.endswith("_link")
and not value.startswith("http")
):
snippets.append(f"{title} {key}: {value}.")
for organic_result in res.get("organic_results", []):
if "snippet" in organic_result.keys():
snippets.append(organic_result["snippet"])
elif "snippet_highlighted_words" in organic_result.keys():
snippets.append(organic_result["snippet_highlighted_words"])
elif "rich_snippet" in organic_result.keys():
snippets.append(organic_result["rich_snippet"])
elif "rich_snippet_table" in organic_result.keys():
snippets.append(organic_result["rich_snippet_table"])
elif "link" in organic_result.keys():
snippets.append(organic_result["link"])
if "buying_guide" in res.keys():
snippets.append(res["buying_guide"])
if "local_results" in res and isinstance(res["local_results"], list):
snippets += res["local_results"]
if (
"local_results" in res.keys()
and isinstance(res["local_results"], dict)
and "places" in res["local_results"].keys()
):
snippets.append(res["local_results"]["places"])
if len(snippets) > 0:
return str(snippets)
else:
return "No good search result found"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/you.py | """Util that calls you.com Search API.
In order to set this up, follow instructions at:
https://documentation.you.com/quickstart
"""
import warnings
from typing import Any, Dict, List, Literal, Optional
import aiohttp
import requests
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
YOU_API_URL = "https://api.ydc-index.io"
class YouHitMetadata(BaseModel):
"""Metadata on a single hit from you.com"""
title: str = Field(description="The title of the result")
url: str = Field(description="The url of the result")
thumbnail_url: str = Field(description="Thumbnail associated with the result")
description: str = Field(description="Details about the result")
class YouHit(YouHitMetadata):
"""A single hit from you.com, which may contain multiple snippets"""
snippets: List[str] = Field(description="One or snippets of text")
class YouAPIOutput(BaseModel):
"""Output from you.com API."""
hits: List[YouHit] = Field(
description="A list of dictionaries containing the results"
)
class YouDocument(BaseModel):
"""Output of parsing one snippet."""
page_content: str = Field(description="One snippet of text")
metadata: YouHitMetadata
class YouSearchAPIWrapper(BaseModel):
"""Wrapper for you.com Search and News API.
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
You can check out the docs at https://documentation.you.com/api-reference/.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
Attributes
----------
ydc_api_key: str, optional
you.com api key, if YDC_API_KEY is not set in the environment
endpoint_type: str, optional
you.com endpoints: search, news, rag;
`web` and `snippet` alias `search`
`rag` returns `{'message': 'Forbidden'}`
@todo `news` endpoint
num_web_results: int, optional
The max number of web results to return, must be under 20.
This is mapped to the `count` query parameter for the News API.
safesearch: str, optional
Safesearch settings, one of off, moderate, strict, defaults to moderate
country: str, optional
Country code, ex: 'US' for United States, see api docs for list
search_lang: str, optional
(News API) Language codes, ex: 'en' for English, see api docs for list
ui_lang: str, optional
(News API) User interface language for the response, ex: 'en' for English,
see api docs for list
spellcheck: bool, optional
(News API) Whether to spell check query or not, defaults to True
k: int, optional
max number of Documents to return using `results()`
n_hits: int, optional, deprecated
Alias for num_web_results
n_snippets_per_hit: int, optional
limit the number of snippets returned per hit
"""
ydc_api_key: Optional[str] = None
# @todo deprecate `snippet`, not part of API
endpoint_type: Literal["search", "news", "rag", "snippet"] = "search"
# Common fields between Search and News API
num_web_results: Optional[int] = None
safesearch: Optional[Literal["off", "moderate", "strict"]] = None
country: Optional[str] = None
# News API specific fields
search_lang: Optional[str] = None
ui_lang: Optional[str] = None
spellcheck: Optional[bool] = None
k: Optional[int] = None
n_snippets_per_hit: Optional[int] = None
# should deprecate n_hits
n_hits: Optional[int] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
ydc_api_key = get_from_dict_or_env(values, "ydc_api_key", "YDC_API_KEY")
values["ydc_api_key"] = ydc_api_key
return values
@model_validator(mode="after")
def warn_if_set_fields_have_no_effect(self) -> Self:
if self.endpoint_type != "news":
news_api_fields = ("search_lang", "ui_lang", "spellcheck")
for field in news_api_fields:
if getattr(self, field):
warnings.warn(
(
f"News API-specific field '{field}' is set but "
f'`endpoint_type="{self.endpoint_type}"`. '
"This will have no effect."
),
UserWarning,
)
if self.endpoint_type not in ("search", "snippet"):
if self.n_snippets_per_hit:
warnings.warn(
(
"Field 'n_snippets_per_hit' only has effect on "
'`endpoint_type="search"`.'
),
UserWarning,
)
return self
@model_validator(mode="after")
def warn_if_deprecated_endpoints_are_used(self) -> Self:
if self.endpoint_type == "snippets":
warnings.warn(
(
f'`endpoint_type="{self.endpoint_type}"` is deprecated. '
'Use `endpoint_type="search"` instead.'
),
DeprecationWarning,
)
return self
def _generate_params(self, query: str, **kwargs: Any) -> Dict:
"""
Parse parameters required for different You.com APIs.
Args:
query: The query to search for.
"""
params = {
"safesearch": self.safesearch,
"country": self.country,
**kwargs,
}
# Add endpoint-specific params
if self.endpoint_type in ("search", "snippet"):
params.update(
query=query,
num_web_results=self.num_web_results,
)
elif self.endpoint_type == "news":
params.update(
q=query,
count=self.num_web_results,
search_lang=self.search_lang,
ui_lang=self.ui_lang,
spellcheck=self.spellcheck,
)
params = {k: v for k, v in params.items() if v is not None}
return params
def _parse_results(self, raw_search_results: Dict) -> List[Document]:
"""
Extracts snippets from each hit and puts them in a Document
Parameters:
raw_search_results: A dict containing list of hits
Returns:
List[YouDocument]: A dictionary of parsed results
"""
# return news results
if self.endpoint_type == "news":
news_results = raw_search_results["news"]["results"]
if self.k is not None:
news_results = news_results[: self.k]
return [
Document(page_content=result["description"], metadata=result)
for result in news_results
]
docs = []
for hit in raw_search_results["hits"]:
n_snippets_per_hit = self.n_snippets_per_hit or len(hit.get("snippets"))
for snippet in hit.get("snippets")[:n_snippets_per_hit]:
docs.append(
Document(
page_content=snippet,
metadata={
"url": hit.get("url"),
"thumbnail_url": hit.get("thumbnail_url"),
"title": hit.get("title"),
"description": hit.get("description"),
},
)
)
if self.k is not None and len(docs) >= self.k:
return docs
return docs
def raw_results(
self,
query: str,
**kwargs: Any,
) -> Dict:
"""Run query through you.com Search and return hits.
Args:
query: The query to search for.
Returns: YouAPIOutput
"""
headers = {"X-API-Key": self.ydc_api_key or ""}
params = self._generate_params(query, **kwargs)
# @todo deprecate `snippet`, not part of API
if self.endpoint_type == "snippet":
self.endpoint_type = "search"
response = requests.get(
# type: ignore
f"{YOU_API_URL}/{self.endpoint_type}",
params=params,
headers=headers,
)
response.raise_for_status()
return response.json()
def results(
self,
query: str,
**kwargs: Any,
) -> List[Document]:
"""Run query through you.com Search and parses results into Documents."""
raw_search_results = self.raw_results(
query,
**{key: value for key, value in kwargs.items() if value is not None},
)
return self._parse_results(raw_search_results)
async def raw_results_async(
self,
query: str,
**kwargs: Any,
) -> Dict:
"""Get results from the you.com Search API asynchronously."""
headers = {"X-API-Key": self.ydc_api_key or ""}
params = self._generate_params(query, **kwargs)
# @todo deprecate `snippet`, not part of API
if self.endpoint_type == "snippet":
self.endpoint_type = "search"
async with aiohttp.ClientSession() as session:
async with session.get(
url=f"{YOU_API_URL}/{self.endpoint_type}",
params=params,
headers=headers,
) as res:
if res.status == 200:
results = await res.json()
return results
else:
raise Exception(f"Error {res.status}: {res.reason}")
async def results_async(
self,
query: str,
**kwargs: Any,
) -> List[Document]:
raw_search_results_async = await self.raw_results_async(
query,
**{key: value for key, value in kwargs.items() if value is not None},
)
return self._parse_results(raw_search_results_async)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/duckduckgo_search.py | """Util that calls DuckDuckGo Search.
No setup required. Free.
https://pypi.org/project/duckduckgo-search/
"""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, ConfigDict, model_validator
class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup.
"""
region: Optional[str] = "wt-wt"
"""
See https://pypi.org/project/duckduckgo-search/#regions
"""
safesearch: str = "moderate"
"""
Options: strict, moderate, off
"""
time: Optional[str] = "y"
"""
Options: d, w, m, y
"""
max_results: int = 5
backend: str = "api"
"""
Options: api, html, lite
"""
source: str = "text"
"""
Options: text, news
"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import DDGS # noqa: F401
except ImportError:
raise ImportError(
"Could not import duckduckgo-search python package. "
"Please install it with `pip install -U duckduckgo-search`."
)
return values
def _ddgs_text(
self, query: str, max_results: Optional[int] = None
) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo text search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.text(
query,
region=self.region, # type: ignore[arg-type]
safesearch=self.safesearch,
timelimit=self.time,
max_results=max_results or self.max_results,
backend=self.backend,
)
if ddgs_gen:
return [r for r in ddgs_gen]
return []
def _ddgs_news(
self, query: str, max_results: Optional[int] = None
) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo news search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.news(
query,
region=self.region, # type: ignore[arg-type]
safesearch=self.safesearch,
timelimit=self.time,
max_results=max_results or self.max_results,
)
if ddgs_gen:
return [r for r in ddgs_gen]
return []
def run(self, query: str) -> str:
"""Run query through DuckDuckGo and return concatenated results."""
if self.source == "text":
results = self._ddgs_text(query)
elif self.source == "news":
results = self._ddgs_news(query)
else:
results = []
if not results:
return "No good DuckDuckGo Search Result was found"
return " ".join(r["body"] for r in results)
def results(
self, query: str, max_results: int, source: Optional[str] = None
) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
max_results: The number of results to return.
source: The source to look from.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
source = source or self.source
if source == "text":
results = [
{"snippet": r["body"], "title": r["title"], "link": r["href"]}
for r in self._ddgs_text(query, max_results=max_results)
]
elif source == "news":
results = [
{
"snippet": r["body"],
"title": r["title"],
"link": r["url"],
"date": r["date"],
"source": r["source"],
}
for r in self._ddgs_news(query, max_results=max_results)
]
else:
results = []
if results is None:
results = [{"Result": "No good DuckDuckGo Search Result was found"}]
return results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/anthropic.py | from typing import Any, List
def _get_anthropic_client() -> Any:
try:
import anthropic
except ImportError:
raise ImportError(
"Could not import anthropic python package. "
"This is needed in order to accurately tokenize the text "
"for anthropic models. Please install it with `pip install anthropic`."
)
return anthropic.Anthropic()
def get_num_tokens_anthropic(text: str) -> int:
"""Get the number of tokens in a string of text."""
client = _get_anthropic_client()
return client.count_tokens(text=text)
def get_token_ids_anthropic(text: str) -> List[int]:
"""Get the token ids for a string of text."""
client = _get_anthropic_client()
tokenizer = client.get_tokenizer()
encoded_text = tokenizer.encode(text)
return encoded_text.ids
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/output_parsers/rail_parser.py | from __future__ import annotations
from typing import Any, Callable, Dict, Optional
from langchain_core.output_parsers import BaseOutputParser
class GuardrailsOutputParser(BaseOutputParser):
"""Parse the output of an LLM call using Guardrails."""
guard: Any
"""The Guardrails object."""
api: Optional[Callable]
"""The LLM API passed to Guardrails during parsing. An example is `openai.completions.create`.""" # noqa: E501
args: Any
"""Positional arguments to pass to the above LLM API callable."""
kwargs: Any
"""Keyword arguments to pass to the above LLM API callable."""
@property
def _type(self) -> str:
return "guardrails"
@classmethod
def from_rail(
cls,
rail_file: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
"""Create a GuardrailsOutputParser from a rail file.
Args:
rail_file: a rail file.
num_reasks: number of times to re-ask the question.
api: the API to use for the Guardrails object.
*args: The arguments to pass to the API
**kwargs: The keyword arguments to pass to the API.
Returns:
GuardrailsOutputParser
"""
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_rail(rail_file, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
@classmethod
def from_rail_string(
cls,
rail_str: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
@classmethod
def from_pydantic(
cls,
output_class: Any,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_pydantic(output_class, "", num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
def parse(self, text: str) -> Dict:
return self.guard.parse(text, llm_api=self.api, *self.args, **self.kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/output_parsers/ernie_functions.py | import copy
import json
from typing import Any, Dict, List, Optional, Type, Union
import jsonpatch
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
)
from langchain_core.output_parsers.json import parse_partial_json
from langchain_core.outputs.chat_generation import (
ChatGeneration,
Generation,
)
from pydantic import BaseModel, model_validator
class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
"""Parse an output that is one of sets of values."""
args_only: bool = True
"""Whether to only return the arguments to the function call."""
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError as exc:
raise OutputParserException(f"Could not parse function call: {exc}")
if self.args_only:
return func_call["arguments"]
return func_call
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse an output as the Json object."""
strict: bool = False
"""Whether to allow non-JSON-compliant strings.
See: https://docs.python.org/3/library/json.html#encoders-and-decoders
Useful when the parsed output may include unicode characters or new lines.
"""
args_only: bool = True
"""Whether to only return the arguments to the function call."""
@property
def _type(self) -> str:
return "json_functions"
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
if len(result) != 1:
raise OutputParserException(
f"Expected exactly one result, but got {len(result)}"
)
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
message = generation.message
if "function_call" not in message.additional_kwargs:
return None
try:
function_call = message.additional_kwargs["function_call"]
except KeyError as exc:
if partial:
return None
else:
raise OutputParserException(f"Could not parse function call: {exc}")
try:
if partial:
if self.args_only:
return parse_partial_json(
function_call["arguments"], strict=self.strict
)
else:
return {
**function_call,
"arguments": parse_partial_json(
function_call["arguments"], strict=self.strict
),
}
else:
if self.args_only:
try:
return json.loads(
function_call["arguments"], strict=self.strict
)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f"Could not parse function call data: {exc}"
)
else:
try:
return {
**function_call,
"arguments": json.loads(
function_call["arguments"], strict=self.strict
),
}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f"Could not parse function call data: {exc}"
)
except KeyError:
return None
# This method would be called by the default implementation of `parse_result`
# but we're overriding that method so it's not needed.
def parse(self, text: str) -> Any:
raise NotImplementedError()
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
"""Parse an output as the element of the Json object."""
key_name: str
"""The name of the key to return."""
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
class PydanticOutputFunctionsParser(OutputFunctionsParser):
"""Parse an output as a pydantic object."""
pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]]
"""The pydantic schema to parse the output with."""
@model_validator(mode="before")
@classmethod
def validate_schema(cls, values: Dict) -> Any:
schema = values["pydantic_schema"]
if "args_only" not in values:
values["args_only"] = isinstance(schema, type) and issubclass(
schema, BaseModel
)
elif values["args_only"] and isinstance(schema, Dict):
raise ValueError(
"If multiple pydantic schemas are provided then args_only should be"
" False."
)
return values
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
_result = super().parse_result(result)
if self.args_only:
pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore
else:
fn_name = _result["name"]
_args = _result["arguments"]
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore
return pydantic_args
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
"""Parse an output as an attribute of a pydantic object."""
attr_name: str
"""The name of the attribute to return."""
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
result = super().parse_result(result)
return getattr(result, self.attr_name)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/output_parsers/__init__.py | """**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # GuardrailsOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/naver.py | import logging
from typing import Any, Dict, List, Optional, cast
import httpx
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_env
from pydantic import (
AliasChoices,
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from typing_extensions import Self
_DEFAULT_BASE_URL = "https://clovastudio.apigw.ntruss.com"
logger = logging.getLogger(__name__)
def _raise_on_error(response: httpx.Response) -> None:
"""Raise an error if the response is an error."""
if httpx.codes.is_error(response.status_code):
error_message = response.read().decode("utf-8")
raise httpx.HTTPStatusError(
f"Error response {response.status_code} "
f"while fetching {response.url}: {error_message}",
request=response.request,
response=response,
)
async def _araise_on_error(response: httpx.Response) -> None:
"""Raise an error if the response is an error."""
if httpx.codes.is_error(response.status_code):
error_message = (await response.aread()).decode("utf-8")
raise httpx.HTTPStatusError(
f"Error response {response.status_code} "
f"while fetching {response.url}: {error_message}",
request=response.request,
response=response,
)
class ClovaXEmbeddings(BaseModel, Embeddings):
"""`NCP ClovaStudio` Embedding API.
following environment variables set or passed in constructor in lower case:
- ``NCP_CLOVASTUDIO_API_KEY``
- ``NCP_APIGW_API_KEY``
- ``NCP_CLOVASTUDIO_APP_ID``
Example:
.. code-block:: python
from langchain_community import ClovaXEmbeddings
model = ClovaXEmbeddings(model="clir-emb-dolphin")
output = embedding.embed_documents(documents)
""" # noqa: E501
client: Optional[httpx.Client] = Field(default=None) #: :meta private:
async_client: Optional[httpx.AsyncClient] = Field(default=None) #: :meta private:
ncp_clovastudio_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Automatically inferred from env are `NCP_CLOVASTUDIO_API_KEY` if not provided."""
ncp_apigw_api_key: Optional[SecretStr] = Field(default=None, alias="apigw_api_key")
"""Automatically inferred from env are `NCP_APIGW_API_KEY` if not provided."""
base_url: Optional[str] = Field(default=None, alias="base_url")
"""
Automatically inferred from env are `NCP_CLOVASTUDIO_API_BASE_URL` if not provided.
"""
app_id: Optional[str] = Field(default=None)
service_app: bool = Field(
default=False,
description="false: use testapp, true: use service app on NCP Clova Studio",
)
model_name: str = Field(
default="clir-emb-dolphin",
validation_alias=AliasChoices("model_name", "model"),
description="NCP ClovaStudio embedding model name",
)
timeout: int = Field(gt=0, default=60)
model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=())
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"ncp_clovastudio_api_key": "NCP_CLOVASTUDIO_API_KEY",
"ncp_apigw_api_key": "NCP_APIGW_API_KEY",
}
@property
def _api_url(self) -> str:
"""GET embedding api url"""
app_type = "serviceapp" if self.service_app else "testapp"
model_name = self.model_name if self.model_name != "bge-m3" else "v2"
return (
f"{self.base_url}/{app_type}"
f"/v1/api-tools/embedding/{model_name}/{self.app_id}"
)
@model_validator(mode="after")
def validate_model_after(self) -> Self:
if not self.ncp_clovastudio_api_key:
self.ncp_clovastudio_api_key = convert_to_secret_str(
get_from_env("ncp_clovastudio_api_key", "NCP_CLOVASTUDIO_API_KEY")
)
if not self.ncp_apigw_api_key:
self.ncp_apigw_api_key = convert_to_secret_str(
get_from_env("ncp_apigw_api_key", "NCP_APIGW_API_KEY", "")
)
if not self.base_url:
self.base_url = get_from_env(
"base_url", "NCP_CLOVASTUDIO_API_BASE_URL", _DEFAULT_BASE_URL
)
if not self.app_id:
self.app_id = get_from_env("app_id", "NCP_CLOVASTUDIO_APP_ID")
if not self.client:
self.client = httpx.Client(
base_url=self.base_url,
headers=self.default_headers(),
timeout=self.timeout,
)
if not self.async_client:
self.async_client = httpx.AsyncClient(
base_url=self.base_url,
headers=self.default_headers(),
timeout=self.timeout,
)
return self
def default_headers(self) -> Dict[str, Any]:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
clovastudio_api_key = (
self.ncp_clovastudio_api_key.get_secret_value()
if self.ncp_clovastudio_api_key
else None
)
if clovastudio_api_key:
headers["X-NCP-CLOVASTUDIO-API-KEY"] = clovastudio_api_key
apigw_api_key = (
self.ncp_apigw_api_key.get_secret_value()
if self.ncp_apigw_api_key
else None
)
if apigw_api_key:
headers["X-NCP-APIGW-API-KEY"] = apigw_api_key
return headers
def _embed_text(self, text: str) -> List[float]:
payload = {"text": text}
client = cast(httpx.Client, self.client)
response = client.post(url=self._api_url, json=payload)
_raise_on_error(response)
return response.json()["result"]["embedding"]
async def _aembed_text(self, text: str) -> List[float]:
payload = {"text": text}
async_client = cast(httpx.AsyncClient, self.client)
response = await async_client.post(url=self._api_url, json=payload)
await _araise_on_error(response)
return response.json()["result"]["embedding"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for text in texts:
embeddings.append(self._embed_text(text))
return embeddings
def embed_query(self, text: str) -> List[float]:
return self._embed_text(text)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for text in texts:
embedding = await self._aembed_text(text)
embeddings.append(embedding)
return embeddings
async def aembed_query(self, text: str) -> List[float]:
return await self._aembed_text(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/dashscope.py | from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
BATCH_SIZE = {"text-embedding-v1": 25, "text-embedding-v2": 25, "text-embedding-v3": 6}
def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]:
multiplier = 1
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 1 seconds, then up to 4 seconds, then 4 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
result = []
i = 0
input_data = kwargs["input"]
input_len = len(input_data) if isinstance(input_data, list) else 1
batch_size = BATCH_SIZE.get(kwargs["model"], 25)
while i < input_len:
kwargs["input"] = (
input_data[i : i + batch_size]
if isinstance(input_data, list)
else input_data
)
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
result += resp.output["embeddings"]
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
i += batch_size
return result
return _embed_with_retry(**kwargs)
class DashScopeEmbeddings(BaseModel, Embeddings):
"""DashScope embedding models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key")
Example:
.. code-block:: python
import os
os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY"
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(
model="text-embedding-v1",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any = None #: :meta private:
"""The DashScope client."""
model: str = "text-embedding-v1"
dashscope_api_key: Optional[str] = None
max_retries: int = 5
"""Maximum number of retries to make when generating."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
import dashscope
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
)
dashscope.api_key = values["dashscope_api_key"]
try:
import dashscope
values["client"] = dashscope.TextEmbedding
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(
self, input=texts, text_type="document", model=self.model
)
embedding_list = [item["embedding"] for item in embeddings]
return embedding_list
def embed_query(self, text: str) -> List[float]:
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(
self, input=text, text_type="query", model=self.model
)[0]["embedding"]
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/baichuan.py | from typing import Any, List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import (
secret_from_env,
)
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from requests import RequestException
from typing_extensions import Self
BAICHUAN_API_URL: str = "https://api.baichuan-ai.com/v1/embeddings"
# BaichuanTextEmbeddings is an embedding model provided by Baichuan Inc. (https://www.baichuan-ai.com/home).
# As of today (Jan 25th, 2024) BaichuanTextEmbeddings ranks #1 in C-MTEB
# (Chinese Multi-Task Embedding Benchmark) leaderboard.
# Leaderboard (Under Overall -> Chinese section): https://huggingface.co/spaces/mteb/leaderboard
# Official Website: https://platform.baichuan-ai.com/docs/text-Embedding
# An API-key is required to use this embedding model. You can get one by registering
# at https://platform.baichuan-ai.com/docs/text-Embedding.
# BaichuanTextEmbeddings support 512 token window and produces vectors with
# 1024 dimensions.
# NOTE!! BaichuanTextEmbeddings only supports Chinese text embedding.
# Multi-language support is coming soon.
class BaichuanTextEmbeddings(BaseModel, Embeddings):
"""Baichuan Text Embedding models.
Setup:
To use, you should set the environment variable ``BAICHUAN_API_KEY`` to
your API key or pass it as a named parameter to the constructor.
.. code-block:: bash
export BAICHUAN_API_KEY="your-api-key"
Instantiate:
.. code-block:: python
from langchain_community.embeddings import BaichuanTextEmbeddings
embeddings = BaichuanTextEmbeddings()
Embed:
.. code-block:: python
# embed the documents
vectors = embeddings.embed_documents([text1, text2, ...])
# embed the query
vectors = embeddings.embed_query(text)
""" # noqa: E501
session: Any = None #: :meta private:
model_name: str = Field(default="Baichuan-Text-Embedding", alias="model")
"""The model used to embed the documents."""
baichuan_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env(["BAICHUAN_API_KEY", "BAICHUAN_AUTH_TOKEN"]),
)
"""Automatically inferred from env var `BAICHUAN_API_KEY` if not provided."""
chunk_size: int = 16
"""Chunk size when multiple texts are input"""
model_config = ConfigDict(populate_by_name=True, protected_namespaces=())
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that auth token exists in environment."""
session = requests.Session()
session.headers.update(
{
"Authorization": f"Bearer {self.baichuan_api_key.get_secret_value()}",
"Accept-Encoding": "identity",
"Content-type": "application/json",
}
)
self.session = session
return self
def _embed(self, texts: List[str]) -> Optional[List[List[float]]]:
"""Internal method to call Baichuan Embedding API and return embeddings.
Args:
texts: A list of texts to embed.
Returns:
A list of list of floats representing the embeddings, or None if an
error occurs.
"""
chunk_texts = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
embed_results = []
for chunk in chunk_texts:
response = self.session.post(
BAICHUAN_API_URL, json={"input": chunk, "model": self.model_name}
)
# Raise exception if response status code from 400 to 600
response.raise_for_status()
# Check if the response status code indicates success
if response.status_code == 200:
resp = response.json()
embeddings = resp.get("data", [])
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e.get("index", 0))
# Return just the embeddings
embed_results.extend(
[result.get("embedding", []) for result in sorted_embeddings]
)
else:
# Log error or handle unsuccessful response appropriately
# Handle 100 <= status_code < 400, not include 200
raise RequestException(
f"Error: Received status code {response.status_code} from "
"`BaichuanEmbedding` API"
)
return embed_results
def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: # type: ignore[override]
"""Public method to get embeddings for a list of documents.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, one for each text, or None if an error occurs.
"""
return self._embed(texts)
def embed_query(self, text: str) -> Optional[List[float]]: # type: ignore[override]
"""Public method to get embedding for a single query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text, or None if an error occurs.
"""
result = self._embed([text])
return result[0] if result is not None else None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/jina.py | import base64
from os.path import exists
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
JINA_API_URL: str = "https://api.jina.ai/v1/embeddings"
def is_local(url: str) -> bool:
"""Check if a URL is a local file.
Args:
url (str): The URL to check.
Returns:
bool: True if the URL is a local file, False otherwise.
"""
url_parsed = urlparse(url)
if url_parsed.scheme in ("file", ""): # Possibly a local file
return exists(url_parsed.path)
return False
def get_bytes_str(file_path: str) -> str:
"""Get the bytes string of a file.
Args:
file_path (str): The path to the file.
Returns:
str: The bytes string of the file.
"""
with open(file_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
class JinaEmbeddings(BaseModel, Embeddings):
"""Jina embedding models."""
session: Any #: :meta private:
model_name: str = "jina-embeddings-v2-base-en"
jina_api_key: Optional[SecretStr] = None
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that auth token exists in environment."""
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_api_key", "JINA_API_KEY")
)
except ValueError as original_exc:
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_auth_token", "JINA_AUTH_TOKEN")
)
except ValueError:
raise original_exc
session = requests.Session()
session.headers.update(
{
"Authorization": f"Bearer {jina_api_key.get_secret_value()}",
"Accept-Encoding": "identity",
"Content-type": "application/json",
}
)
values["session"] = session
return values
def _embed(self, input: Any) -> List[List[float]]:
# Call Jina AI Embedding API
resp = self.session.post( # type: ignore
JINA_API_URL, json={"input": input, "model": self.model_name}
).json()
if "data" not in resp:
raise RuntimeError(resp["detail"])
embeddings = resp["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # type: ignore
# Return just the embeddings
return [result["embedding"] for result in sorted_embeddings]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts)
def embed_query(self, text: str) -> List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed([text])[0]
def embed_images(self, uris: List[str]) -> List[List[float]]:
"""Call out to Jina's image embedding endpoint.
Args:
uris: The list of uris to embed.
Returns:
List of embeddings, one for each text.
"""
input = []
for uri in uris:
if is_local(uri):
input.append({"bytes": get_bytes_str(uri)})
else:
input.append({"url": uri})
return self._embed(input)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/gradient_ai.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from packaging.version import parse
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self
__all__ = ["GradientEmbeddings"]
class GradientEmbeddings(BaseModel, Embeddings):
"""Gradient.ai Embedding models.
GradientLLM is a class to interact with Embedding Models on gradient.ai
To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your
API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace,
or alternatively provide them as keywords to the constructor of this class.
Example:
.. code-block:: python
from langchain_community.embeddings import GradientEmbeddings
GradientEmbeddings(
model="bge-large",
gradient_workspace_id="12345614fc0_workspace",
gradient_access_token="gradientai-access_token",
)
"""
model: str
"Underlying gradient.ai model id."
gradient_workspace_id: Optional[str] = None
"Underlying gradient.ai workspace_id."
gradient_access_token: Optional[str] = None
"""gradient.ai API Token, which can be generated by going to
https://auth.gradient.ai/select-workspace
and selecting "Access tokens" under the profile drop-down.
"""
gradient_api_url: str = "https://api.gradient.ai/api"
"""Endpoint URL to use."""
query_prompt_for_retrieval: Optional[str] = None
"""Query pre-prompt"""
client: Any = None #: :meta private:
"""Gradient client."""
# LLM call kwargs
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["gradient_access_token"] = get_from_dict_or_env(
values, "gradient_access_token", "GRADIENT_ACCESS_TOKEN"
)
values["gradient_workspace_id"] = get_from_dict_or_env(
values, "gradient_workspace_id", "GRADIENT_WORKSPACE_ID"
)
values["gradient_api_url"] = get_from_dict_or_env(
values,
"gradient_api_url",
"GRADIENT_API_URL",
default="https://api.gradient.ai/api",
)
return values
@model_validator(mode="after")
def post_init(self) -> Self:
try:
import gradientai
except ImportError:
raise ImportError(
'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.'
)
if parse(gradientai.__version__) < parse("1.4.0"):
raise ImportError(
'GradientEmbeddings requires `pip install -U "gradientai>=1.4.0"`.'
)
gradient = gradientai.Gradient(
access_token=self.gradient_access_token,
workspace_id=self.gradient_workspace_id,
host=self.gradient_api_url,
)
self.client = gradient.get_embeddings_model(slug=self.model)
return self
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
inputs = [{"input": text} for text in texts]
result = self.client.embed(inputs=inputs).embeddings
return [e.embedding for e in result]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
inputs = [{"input": text} for text in texts]
result = (await self.client.aembed(inputs=inputs)).embeddings
return [e.embedding for e in result]
def embed_query(self, text: str) -> List[float]:
"""Call out to Gradient's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query = (
f"{self.query_prompt_for_retrieval} {text}"
if self.query_prompt_for_retrieval
else text
)
return self.embed_documents([query])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Gradient's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query = (
f"{self.query_prompt_for_retrieval} {text}"
if self.query_prompt_for_retrieval
else text
)
embeddings = await self.aembed_documents([query])
return embeddings[0]
class TinyAsyncGradientEmbeddingClient: #: :meta private:
"""Deprecated, TinyAsyncGradientEmbeddingClient was removed.
This class is just for backwards compatibility with older versions
of langchain_community.
It might be entirely removed in the future.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
raise ValueError("Deprecated,TinyAsyncGradientEmbeddingClient was removed.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/self_hosted.py | from typing import Any, Callable, List
from langchain_core.embeddings import Embeddings
from pydantic import ConfigDict
from langchain_community.llms.self_hosted import SelfHostedPipeline
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain_community.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain_community.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
model_config = ConfigDict(
extra="forbid",
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/ascend.py | import os
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class AscendEmbeddings(Embeddings, BaseModel):
"""
Ascend NPU accelerate Embedding model
Please ensure that you have installed CANN and torch_npu.
Example:
from langchain_community.embeddings import AscendEmbeddings
model = AscendEmbeddings(model_path=<path_to_model>,
device_id=0,
query_instruction="Represent this sentence for searching relevant passages: "
)
"""
"""model path"""
model_path: str
"""Ascend NPU device id."""
device_id: int = 0
"""Unstruntion to used for embedding query."""
query_instruction: str = ""
"""Unstruntion to used for embedding document."""
document_instruction: str = ""
use_fp16: bool = True
pooling_method: Optional[str] = "cls"
model: Any
tokenizer: Any
model_config = ConfigDict(protected_namespaces=())
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
try:
from transformers import AutoModel, AutoTokenizer
except ImportError as e:
raise ImportError(
"Unable to import transformers, please install with "
"`pip install -U transformers`."
) from e
try:
self.model = AutoModel.from_pretrained(self.model_path).npu().eval()
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
except Exception as e:
raise Exception(
f"Failed to load model [self.model_path], due to following error:{e}"
)
if self.use_fp16:
self.model.half()
self.encode([f"warmup {i} times" for i in range(10)])
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
if "model_path" not in values:
raise ValueError("model_path is required")
if not os.access(values["model_path"], os.F_OK):
raise FileNotFoundError(
f"Unable to find valid model path in [{values['model_path']}]"
)
try:
import torch_npu
except ImportError:
raise ModuleNotFoundError("torch_npu not found, please install torch_npu")
except Exception as e:
raise e
try:
torch_npu.npu.set_device(values["device_id"])
except Exception as e:
raise Exception(f"set device failed due to {e}")
return values
def encode(self, sentences: Any) -> Any:
inputs = self.tokenizer(
sentences,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512,
)
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with " "`pip install -U torch`."
) from e
last_hidden_state = self.model(
inputs.input_ids.npu(), inputs.attention_mask.npu(), return_dict=True
).last_hidden_state
tmp = self.pooling(last_hidden_state, inputs["attention_mask"].npu())
embeddings = torch.nn.functional.normalize(tmp, dim=-1)
return embeddings.cpu().detach().numpy()
def pooling(self, last_hidden_state: Any, attention_mask: Any = None) -> Any:
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with " "`pip install -U torch`."
) from e
if self.pooling_method == "cls":
return last_hidden_state[:, 0]
elif self.pooling_method == "mean":
s = torch.sum(
last_hidden_state * attention_mask.unsqueeze(-1).float(), dim=-1
)
d = attention_mask.sum(dim=1, keepdim=True).float()
return s / d
else:
raise NotImplementedError(
f"Pooling method [{self.pooling_method}] not implemented"
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self.encode([self.document_instruction + text for text in texts])
def embed_query(self, text: str) -> List[float]:
return self.encode([self.query_instruction + text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/databricks.py | from __future__ import annotations
from typing import Iterator, List
from urllib.parse import urlparse
from langchain_core._api import deprecated
from langchain_community.embeddings.mlflow import MlflowEmbeddings
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
@deprecated(
since="0.3.3",
removal="1.0",
alternative_import="langchain_databricks.DatabricksEmbeddings",
)
class DatabricksEmbeddings(MlflowEmbeddings):
"""Databricks embeddings.
To use, you should have the ``mlflow`` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(
target_uri="databricks",
endpoint="embeddings",
)
"""
target_uri: str = "databricks"
"""The target URI to use. Defaults to ``databricks``."""
@property
def _mlflow_extras(self) -> str:
return ""
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
if urlparse(self.target_uri).scheme != "databricks":
raise ValueError(
"Invalid target URI. The target URI must be a valid databricks URI."
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/openvino.py | from pathlib import Path
from typing import Any, Dict, List
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
"Represent this question for searching relevant passages: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"
class OpenVINOEmbeddings(BaseModel, Embeddings):
"""OpenVINO embedding models.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenVINOEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'CPU'}
encode_kwargs = {'normalize_embeddings': True}
ov = OpenVINOEmbeddings(
model_name_or_path=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
ov_model: Any = None
"""OpenVINO model object."""
tokenizer: Any = None
"""Tokenizer for embedding model."""
model_name_or_path: str
"""HuggingFace model id."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the model."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
from optimum.intel.openvino import OVModelForFeatureExtraction
except ImportError as e:
raise ImportError(
"Could not import optimum-intel python package. "
"Please install it with: "
"pip install -U 'optimum[openvino,nncf]'"
) from e
try:
from huggingface_hub import HfApi
except ImportError as e:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with: "
"`pip install -U huggingface_hub`."
) from e
def require_model_export(
model_id: str, revision: Any = None, subfolder: Any = None
) -> bool:
model_dir = Path(model_id)
if subfolder is not None:
model_dir = model_dir / subfolder
if model_dir.is_dir():
return (
not (model_dir / "openvino_model.xml").exists()
or not (model_dir / "openvino_model.bin").exists()
)
hf_api = HfApi()
try:
model_info = hf_api.model_info(model_id, revision=revision or "main")
normalized_subfolder = (
None if subfolder is None else Path(subfolder).as_posix()
)
model_files = [
file.rfilename
for file in model_info.siblings
if normalized_subfolder is None
or file.rfilename.startswith(normalized_subfolder)
]
ov_model_path = (
"openvino_model.xml"
if subfolder is None
else f"{normalized_subfolder}/openvino_model.xml"
)
return (
ov_model_path not in model_files
or ov_model_path.replace(".xml", ".bin") not in model_files
)
except Exception:
return True
if require_model_export(self.model_name_or_path):
# use remote model
self.ov_model = OVModelForFeatureExtraction.from_pretrained(
self.model_name_or_path, export=True, **self.model_kwargs
)
else:
# use local model
self.ov_model = OVModelForFeatureExtraction.from_pretrained(
self.model_name_or_path, **self.model_kwargs
)
try:
from transformers import AutoTokenizer
except ImportError as e:
raise ImportError(
"Unable to import transformers, please install with "
"`pip install -U transformers`."
) from e
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path)
def _text_length(self, text: Any) -> int:
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): # {key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, "__len__"): # Object has no len() method
return 1
# Empty string or list of ints
elif len(text) == 0 or isinstance(text[0], int):
return len(text)
else:
# Sum of length of individual strings
return sum([len(t) for t in text])
def encode(
self,
sentences: Any,
batch_size: int = 4,
show_progress_bar: bool = False,
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
mean_pooling: bool = False,
normalize_embeddings: bool = True,
) -> Any:
"""
Computes sentence embeddings.
:param sentences: the sentences to embed.
:param batch_size: the batch size used for the computation.
:param show_progress_bar: Whether to output a progress bar.
:param convert_to_numpy: Whether the output should be a list of numpy vectors.
:param convert_to_tensor: Whether the output should be one large tensor.
:param mean_pooling: Whether to pool returned vectors.
:param normalize_embeddings: Whether to normalize returned vectors.
:return: By default, a 2d numpy array with shape [num_inputs, output_dimension].
"""
try:
import numpy as np
except ImportError as e:
raise ImportError(
"Unable to import numpy, please install with " "`pip install -U numpy`."
) from e
try:
from tqdm import trange
except ImportError as e:
raise ImportError(
"Unable to import tqdm, please install with " "`pip install -U tqdm`."
) from e
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with " "`pip install -U torch`."
) from e
def run_mean_pooling(model_output: Any, attention_mask: Any) -> Any:
token_embeddings = model_output[
0
] # First element of model_output contains all token embeddings
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
if convert_to_tensor:
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(
sentences, "__len__"
): # Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
all_embeddings: Any = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(
0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar
):
sentences_batch = sentences_sorted[start_index : start_index + batch_size]
length = self.ov_model.request.inputs[0].get_partial_shape()[1]
if length.is_dynamic:
features = self.tokenizer(
sentences_batch, padding=True, truncation=True, return_tensors="pt"
)
else:
features = self.tokenizer(
sentences_batch,
padding="max_length",
max_length=length.get_length(),
truncation=True,
return_tensors="pt",
)
out_features = self.ov_model(**features)
if mean_pooling:
embeddings = run_mean_pooling(out_features, features["attention_mask"])
else:
embeddings = out_features[0][:, 0]
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
if len(all_embeddings):
all_embeddings = torch.stack(all_embeddings)
else:
all_embeddings = torch.Tensor()
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.encode(
texts, show_progress_bar=self.show_progress, **self.encode_kwargs
)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
def save_model(
self,
model_path: str,
) -> bool:
self.ov_model.half()
self.ov_model.save_pretrained(model_path)
self.tokenizer.save_pretrained(model_path)
return True
class OpenVINOBgeEmbeddings(OpenVINOEmbeddings):
"""OpenVNO BGE embedding models.
Bge Example:
.. code-block:: python
from langchain_community.embeddings import OpenVINOBgeEmbeddings
model_name = "BAAI/bge-large-en-v1.5"
model_kwargs = {'device': 'CPU'}
encode_kwargs = {'normalize_embeddings': True}
ov = OpenVINOBgeEmbeddings(
model_name_or_path=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
"""Instruction to use for embedding query."""
embed_instruction: str = ""
"""Instruction to use for embedding document."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
if "-zh" in self.model_name_or_path:
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = [self.embed_instruction + t.replace("\n", " ") for t in texts]
embeddings = self.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.encode(self.query_instruction + text, **self.encode_kwargs)
return embedding.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/llamafile.py | import logging
from typing import List, Optional
import requests
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel
logger = logging.getLogger(__name__)
class LlamafileEmbeddings(BaseModel, Embeddings):
"""Llamafile lets you distribute and run large language models with a
single file.
To get started, see: https://github.com/Mozilla-Ocho/llamafile
To use this class, you will need to first:
1. Download a llamafile.
2. Make the downloaded file executable: `chmod +x path/to/model.llamafile`
3. Start the llamafile in server mode with embeddings enabled:
`./path/to/model.llamafile --server --nobrowser --embedding`
Example:
.. code-block:: python
from langchain_community.embeddings import LlamafileEmbeddings
embedder = LlamafileEmbeddings()
doc_embeddings = embedder.embed_documents(
[
"Alpha is the first letter of the Greek alphabet",
"Beta is the second letter of the Greek alphabet",
]
)
query_embedding = embedder.embed_query(
"What is the second letter of the Greek alphabet"
)
"""
base_url: str = "http://localhost:8080"
"""Base url where the llamafile server is listening."""
request_timeout: Optional[int] = None
"""Timeout for server requests"""
def _embed(self, text: str) -> List[float]:
try:
response = requests.post(
url=f"{self.base_url}/embedding",
headers={
"Content-Type": "application/json",
},
json={
"content": text,
},
timeout=self.request_timeout,
)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError(
f"Could not connect to Llamafile server. Please make sure "
f"that a server is running at {self.base_url}."
)
# Raise exception if we got a bad (non-200) response status code
response.raise_for_status()
contents = response.json()
if "embedding" not in contents:
raise KeyError(
"Unexpected output from /embedding endpoint, output dict "
"missing 'embedding' key."
)
embedding = contents["embedding"]
# Sanity check the embedding vector:
# Prior to llamafile v0.6.2, if the server was not started with the
# `--embedding` option, the embedding endpoint would always return a
# 0-vector. See issue:
# https://github.com/Mozilla-Ocho/llamafile/issues/243
# So here we raise an exception if the vector sums to exactly 0.
if sum(embedding) == 0.0:
raise ValueError(
"Embedding sums to 0, did you start the llamafile server with "
"the `--embedding` option enabled?"
)
return embedding
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a llamafile server running at `self.base_url`.
llamafile server should be started in a separate process before invoking
this method.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
doc_embeddings = []
for text in texts:
doc_embeddings.append(self._embed(text))
return doc_embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a llamafile server running at `self.base_url`.
llamafile server should be started in a separate process before invoking
this method.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
class ClarifaiEmbeddings(BaseModel, Embeddings):
"""Clarifai embedding models.
To use, you should have the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import ClarifaiEmbeddings
clarifai = ClarifaiEmbeddings(user_id=USER_ID,
app_id=APP_ID,
model_id=MODEL_ID)
(or)
Example_URL = "https://clarifai.com/clarifai/main/models/BAAI-bge-base-en-v15"
clarifai = ClarifaiEmbeddings(model_url=EXAMPLE_URL)
"""
model_url: Optional[str] = None
"""Model url to use."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = Field(default=None, exclude=True)
"""Clarifai personal access token to use."""
token: Optional[str] = Field(default=None, exclude=True)
"""Clarifai session token to use."""
model: Any = Field(default=None, exclude=True) #: :meta private:
api_base: str = "https://api.clarifai.com"
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
try:
from clarifai.client.model import Model
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
model_version_id = values.get("model_version_id")
model_url = values.get("model_url")
api_base = values.get("api_base")
pat = values.get("pat")
token = values.get("token")
values["model"] = Model(
url=model_url,
app_id=app_id,
user_id=user_id,
model_version=dict(id=model_version_id),
pat=pat,
token=token,
model_id=model_id,
base_url=api_base,
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
from clarifai.client.input import Inputs
input_obj = Inputs.from_auth_helper(self.model.auth_helper)
batch_size = 32
embeddings = []
try:
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
input_batch = [
input_obj.get_text_input(input_id=str(id), raw_text=inp)
for id, inp in enumerate(batch)
]
predict_response = self.model.predict(input_batch)
embeddings.extend(
[
list(output.data.embeddings[0].vector)
for output in predict_response.outputs
]
)
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Clarifai's embedding models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
predict_response = self.model.predict_by_bytes(
bytes(text, "utf-8"), input_type="text"
)
embeddings = [
list(op.data.embeddings[0].vector) for op in predict_response.outputs
]
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return embeddings[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/zhipuai.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, Field, model_validator
class ZhipuAIEmbeddings(BaseModel, Embeddings):
"""ZhipuAI embedding model integration.
Setup:
To use, you should have the ``zhipuai`` python package installed, and the
environment variable ``ZHIPU_API_KEY`` set with your API KEY.
More instructions about ZhipuAi Embeddings, you can get it
from https://open.bigmodel.cn/dev/api#vector
.. code-block:: bash
pip install -U zhipuai
export ZHIPU_API_KEY="your-api-key"
Key init args — completion params:
model: Optional[str]
Name of ZhipuAI model to use.
api_key: str
Automatically inferred from env var `ZHIPU_API_KEY` if not provided.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.embeddings import ZhipuAIEmbeddings
embed = ZhipuAIEmbeddings(
model="embedding-2",
# api_key="...",
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
.. code-block:: python
[-0.003832892, 0.049372625, -0.035413884, -0.019301128, 0.0068899863, 0.01248398, -0.022153955, 0.006623926, 0.00778216, 0.009558191, ...]
Embed multiple text:
.. code-block:: python
input_texts = ["This is a test query1.", "This is a test query2."]
embed.embed_documents(input_texts)
.. code-block:: python
[
[0.0083934665, 0.037985895, -0.06684559, -0.039616987, 0.015481004, -0.023952313, ...],
[-0.02713102, -0.005470169, 0.032321047, 0.042484466, 0.023290444, 0.02170547, ...]
]
""" # noqa: E501
client: Any = Field(default=None, exclude=True) #: :meta private:
model: str = Field(default="embedding-2")
"""Model name"""
api_key: str
"""Automatically inferred from env var `ZHIPU_API_KEY` if not provided."""
dimensions: Optional[int] = None
"""The number of dimensions the resulting output embeddings should have.
Only supported in `embedding-3` and later models.
"""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that auth token exists in environment."""
values["api_key"] = get_from_dict_or_env(values, "api_key", "ZHIPUAI_API_KEY")
try:
from zhipuai import ZhipuAI
values["client"] = ZhipuAI(api_key=values["api_key"])
except ImportError:
raise ImportError(
"Could not import zhipuai python package."
"Please install it with `pip install zhipuai`."
)
return values
def embed_query(self, text: str) -> List[float]:
"""
Embeds a text using the AutoVOT algorithm.
Args:
text: A text to embed.
Returns:
Input document's embedded list.
"""
resp = self.embed_documents([text])
return resp[0]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Embeds a list of text documents using the AutoVOT algorithm.
Args:
texts: A list of text documents to embed.
Returns:
A list of embeddings for each document in the input list.
Each embedding is represented as a list of float values.
"""
if self.dimensions is not None:
resp = self.client.embeddings.create(
model=self.model,
input=texts,
dimensions=self.dimensions,
)
else:
resp = self.client.embeddings.create(model=self.model, input=texts)
embeddings = [r.embedding for r in resp.data]
return embeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/modelscope_hub.py | from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class ModelScopeEmbeddings(BaseModel, Embeddings):
"""ModelScopeHub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
"""
embed: Any = None
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
model_revision: Optional[str] = None
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
self.embed = pipeline(
Tasks.sentence_embedding,
model=self.model_id,
model_revision=self.model_revision,
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/aleph_alpha.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
"""Aleph Alpha's asymmetric semantic embedding.
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding(
normalize=True, compress_to_size=128
)
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
"""
client: Any #: :meta private:
# Embedding params
model: str = "luminous-base"
"""Model name to use."""
compress_to_size: Optional[int] = None
"""Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim."""
normalize: bool = False
"""Should returned embeddings be normalized"""
contextual_control_threshold: Optional[int] = None
"""Attention control parameters only apply to those tokens that have
explicitly been set in the request."""
control_log_additive: bool = True
"""Apply controls on prompt items by adding the log(control_factor)
to attention scores."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal flexibility
in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the request
in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for each
retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added between
the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's asymmetric Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
document_embeddings = []
for text in texts:
document_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Document,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
document_request = SemanticEmbeddingRequest(**document_params)
document_response = self.client.semantic_embed(
request=document_request, model=self.model
)
document_embeddings.append(document_response.embedding)
return document_embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
symmetric_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Query,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
symmetric_request = SemanticEmbeddingRequest(**symmetric_params)
symmetric_response = self.client.semantic_embed(
request=symmetric_request, model=self.model
)
return symmetric_response.embedding
class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
"""Symmetric version of the Aleph Alpha's semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding(
normalize=True, compress_to_size=128
)
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
"""
def _embed(self, text: str) -> List[float]:
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
query_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Symmetric,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
query_request = SemanticEmbeddingRequest(**query_params)
query_response = self.client.semantic_embed(
request=query_request, model=self.model
)
return query_response.embedding
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/bedrock.py | import asyncio
import json
import os
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self
@deprecated(
since="0.2.11",
removal="1.0",
alternative_import="langchain_aws.BedrockEmbeddings",
)
class BedrockEmbeddings(BaseModel, Embeddings):
"""Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain_community.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-embed-text-v1"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any = None #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-embed-text-v1"
"""Id of the model to call, e.g., amazon.titan-embed-text-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
normalize: bool = False
"""Whether the embeddings should be normalized to unit vectors"""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that AWS credentials to and python package exists in environment."""
if self.client is not None:
return self
try:
import boto3
if self.credentials_profile_name is not None:
session = boto3.Session(profile_name=self.credentials_profile_name)
else:
# use default credentials
session = boto3.Session()
client_params = {}
if self.region_name:
client_params["region_name"] = self.region_name
if self.endpoint_url:
client_params["endpoint_url"] = self.endpoint_url
self.client = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
f"profile name are valid. Bedrock error: {e}"
) from e
return self
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
# format input body for provider
provider = self.model_id.split(".")[0]
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs}
if provider == "cohere":
if "input_type" not in input_body.keys():
input_body["input_type"] = "search_document"
input_body["texts"] = [text]
else:
# includes common provider == "amazon"
input_body["inputText"] = text
body = json.dumps(input_body)
try:
# invoke bedrock API
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
# format output based on provider
response_body = json.loads(response.get("body").read())
if provider == "cohere":
return response_body.get("embeddings")[0]
else:
# includes common provider == "amazon"
return response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
def _normalize_vector(self, embeddings: List[float]) -> List[float]:
"""Normalize the embedding to a unit vector."""
emb = np.array(embeddings)
norm_emb = emb / np.linalg.norm(emb)
return norm_emb.tolist()
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
if self.normalize:
response = self._normalize_vector(response)
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self._embedding_func(text)
if self.normalize:
return self._normalize_vector(embedding)
return embedding
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return await run_in_executor(None, self.embed_query, text)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/mosaicml.py | from typing import Any, Dict, List, Mapping, Optional, Tuple
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class MosaicMLInstructorEmbeddings(BaseModel, Embeddings):
"""MosaicML embedding service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import MosaicMLInstructorEmbeddings
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
mosaic_llm = MosaicMLInstructorEmbeddings(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict"
)
"""Endpoint URL to use."""
embed_instruction: str = "Represent the document for retrieval: "
"""Instruction used to embed documents."""
query_instruction: str = (
"Represent the question for retrieving supporting documents: "
)
"""Instruction used to embed the query."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"endpoint_url": self.endpoint_url}
def _embed(
self, input: List[Tuple[str, str]], is_retry: bool = False
) -> List[List[float]]:
payload = {"inputs": input}
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._embed(input, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
output_keys = ["data", "output", "outputs"]
for key in output_keys:
if key in parsed_response:
output_item = parsed_response[key]
break
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
if isinstance(output_item, list) and isinstance(output_item[0], list):
embeddings = output_item
else:
embeddings = [output_item]
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MosaicML deployed instructor embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [(self.embed_instruction, text) for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MosaicML deployed instructor embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = (self.query_instruction, text)
embedding = self._embed([instruction_pair])[0]
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/sparkllm.py | import base64
import hashlib
import hmac
import json
import logging
from datetime import datetime
from time import mktime
from typing import Any, Dict, List, Literal, Optional
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import numpy as np
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import (
secret_from_env,
)
from numpy import ndarray
from pydantic import BaseModel, ConfigDict, Field, SecretStr
# SparkLLMTextEmbeddings is an embedding model provided by iFLYTEK Co., Ltd.. (https://iflytek.com/en/).
# Official Website: https://www.xfyun.cn/doc/spark/Embedding_api.html
# Developers need to create an application in the console first, use the appid, APIKey,
# and APISecret provided in the application for authentication,
# and generate an authentication URL for handshake.
# You can get one by registering at https://console.xfyun.cn/services/bm3.
# SparkLLMTextEmbeddings support 2K token window and preduces vectors with
# 2560 dimensions.
logger = logging.getLogger(__name__)
class Url:
"""URL class for parsing the URL."""
def __init__(self, host: str, path: str, schema: str) -> None:
self.host = host
self.path = path
self.schema = schema
pass
class SparkLLMTextEmbeddings(BaseModel, Embeddings):
"""SparkLLM embedding model integration.
Setup:
To use, you should have the environment variable "SPARK_APP_ID","SPARK_API_KEY"
and "SPARK_API_SECRET" set your APP_ID, API_KEY and API_SECRET or pass it
as a name parameter to the constructor.
.. code-block:: bash
export SPARK_APP_ID="your-api-id"
export SPARK_API_KEY="your-api-key"
export SPARK_API_SECRET="your-api-secret"
Key init args — completion params:
api_key: Optional[str]
Automatically inferred from env var `SPARK_API_KEY` if not provided.
app_id: Optional[str]
Automatically inferred from env var `SPARK_APP_ID` if not provided.
api_secret: Optional[str]
Automatically inferred from env var `SPARK_API_SECRET` if not provided.
base_url: Optional[str]
Base URL path for API requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.embeddings import SparkLLMTextEmbeddings
embed = SparkLLMTextEmbeddings(
api_key="...",
app_id="...",
api_secret="...",
# other
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
.. code-block:: python
[-0.4912109375, 0.60595703125, 0.658203125, 0.3037109375, 0.6591796875, 0.60302734375, ...]
Embed multiple text:
.. code-block:: python
input_texts = ["This is a test query1.", "This is a test query2."]
embed.embed_documents(input_texts)
.. code-block:: python
[
[-0.1962890625, 0.94677734375, 0.7998046875, -0.1971435546875, 0.445556640625, 0.54638671875, ...],
[ -0.44970703125, 0.06585693359375, 0.7421875, -0.474609375, 0.62353515625, 1.0478515625, ...],
]
""" # noqa: E501
spark_app_id: SecretStr = Field(
alias="app_id", default_factory=secret_from_env("SPARK_APP_ID")
)
"""Automatically inferred from env var `SPARK_APP_ID` if not provided."""
spark_api_key: Optional[SecretStr] = Field(
alias="api_key", default_factory=secret_from_env("SPARK_API_KEY", default=None)
)
"""Automatically inferred from env var `SPARK_API_KEY` if not provided."""
spark_api_secret: Optional[SecretStr] = Field(
alias="api_secret",
default_factory=secret_from_env("SPARK_API_SECRET", default=None),
)
"""Automatically inferred from env var `SPARK_API_SECRET` if not provided."""
base_url: str = Field(default="https://emb-cn-huabei-1.xf-yun.com/")
"""Base URL path for API requests"""
domain: Literal["para", "query"] = Field(default="para")
"""This parameter is used for which Embedding this time belongs to.
If "para"(default), it belongs to document Embedding.
If "query", it belongs to query Embedding."""
model_config = ConfigDict(
populate_by_name=True,
)
def _embed(self, texts: List[str], host: str) -> Optional[List[List[float]]]:
"""Internal method to call Spark Embedding API and return embeddings.
Args:
texts: A list of texts to embed.
host: Base URL path for API requests
Returns:
A list of list of floats representing the embeddings,
or list with value None if an error occurs.
"""
app_id = ""
api_key = ""
api_secret = ""
if self.spark_app_id:
app_id = self.spark_app_id.get_secret_value()
if self.spark_api_key:
api_key = self.spark_api_key.get_secret_value()
if self.spark_api_secret:
api_secret = self.spark_api_secret.get_secret_value()
url = self._assemble_ws_auth_url(
request_url=host,
method="POST",
api_key=api_key,
api_secret=api_secret,
)
embed_result: list = []
for text in texts:
query_context = {"messages": [{"content": text, "role": "user"}]}
content = self._get_body(app_id, query_context)
response = requests.post(
url, json=content, headers={"content-type": "application/json"}
).text
res_arr = self._parser_message(response)
if res_arr is not None:
embed_result.append(res_arr.tolist())
else:
embed_result.append(None)
return embed_result
def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: # type: ignore[override]
"""Public method to get embeddings for a list of documents.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, one for each text, or None if an error occurs.
"""
return self._embed(texts, self.base_url)
def embed_query(self, text: str) -> Optional[List[float]]: # type: ignore[override]
"""Public method to get embedding for a single query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text, or None if an error occurs.
"""
result = self._embed([text], self.base_url)
return result[0] if result is not None else None
@staticmethod
def _assemble_ws_auth_url(
request_url: str, method: str = "GET", api_key: str = "", api_secret: str = ""
) -> str:
u = SparkLLMTextEmbeddings._parse_url(request_url)
host = u.host
path = u.path
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
signature_origin = "host: {}\ndate: {}\n{} {} HTTP/1.1".format(
host, date, method, path
)
signature_sha = hmac.new(
api_secret.encode("utf-8"),
signature_origin.encode("utf-8"),
digestmod=hashlib.sha256,
).digest()
signature_sha_str = base64.b64encode(signature_sha).decode(encoding="utf-8")
authorization_origin = (
'api_key="%s", algorithm="%s", headers="%s", signature="%s"'
% (api_key, "hmac-sha256", "host date request-line", signature_sha_str)
)
authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(
encoding="utf-8"
)
values = {"host": host, "date": date, "authorization": authorization}
return request_url + "?" + urlencode(values)
@staticmethod
def _parse_url(request_url: str) -> Url:
stidx = request_url.index("://")
host = request_url[stidx + 3 :]
schema = request_url[: stidx + 3]
edidx = host.index("/")
if edidx <= 0:
raise AssembleHeaderException("invalid request url:" + request_url)
path = host[edidx:]
host = host[:edidx]
u = Url(host, path, schema)
return u
def _get_body(self, appid: str, text: dict) -> Dict[str, Any]:
body = {
"header": {"app_id": appid, "uid": "39769795890", "status": 3},
"parameter": {
"emb": {"domain": self.domain, "feature": {"encoding": "utf8"}}
},
"payload": {
"messages": {
"text": base64.b64encode(json.dumps(text).encode("utf-8")).decode()
}
},
}
return body
@staticmethod
def _parser_message(
message: str,
) -> Optional[ndarray]:
data = json.loads(message)
code = data["header"]["code"]
if code != 0:
logger.warning(f"Request error: {code}, {data}")
return None
else:
text_base = data["payload"]["feature"]["text"]
text_data = base64.b64decode(text_base)
dt = np.dtype(np.float32)
dt = dt.newbyteorder("<")
text = np.frombuffer(text_data, dtype=dt)
if len(text) > 2560:
array = text[:2560]
else:
array = text
return array
class AssembleHeaderException(Exception):
"""Exception raised for errors in the header assembly."""
def __init__(self, msg: str) -> None:
self.message = msg
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field, SecretStr
logger = logging.getLogger(__name__)
class QianfanEmbeddingsEndpoint(BaseModel, Embeddings):
"""Baidu Qianfan Embeddings embedding models.
Setup:
To use, you should have the ``qianfan`` python package installed, and set
environment variables ``QIANFAN_AK``, ``QIANFAN_SK``.
.. code-block:: bash
pip install qianfan
export QIANFAN_AK="your-api-key"
export QIANFAN_SK="your-secret_key"
Instantiate:
.. code-block:: python
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
embeddings = QianfanEmbeddingsEndpoint()
Embed:
.. code-block:: python
# embed the documents
vectors = embeddings.embed_documents([text1, text2, ...])
# embed the query
vectors = embeddings.embed_query(text)
# embed the documents with async
vectors = await embeddings.aembed_documents([text1, text2, ...])
# embed the query with async
vectors = await embeddings.aembed_query(text)
""" # noqa: E501
qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Qianfan application apikey"""
qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key")
"""Qianfan application secretkey"""
chunk_size: int = 16
"""Chunk size when multiple texts are input"""
model: Optional[str] = Field(default=None)
"""Model name
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
for now, we support Embedding-V1 and
- Embedding-V1 (默认模型)
- bge-large-en
- bge-large-zh
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set
"""
endpoint: str = ""
"""Endpoint of the Qianfan Embedding, required if custom model used."""
client: Any = None
"""Qianfan client"""
init_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""init kwargs for qianfan client init, such as `query_per_second` which is
associated with qianfan resource object to limit QPS"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra params for model invoke using with `do`."""
model_config = ConfigDict(protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""
Validate whether qianfan_ak and qianfan_sk in the environment variables or
configuration file are available or not.
init qianfan embedding client with `ak`, `sk`, `model`, `endpoint`
Args:
values: a dictionary containing configuration information, must include the
fields of qianfan_ak and qianfan_sk
Returns:
a dictionary containing configuration information. If qianfan_ak and
qianfan_sk are not provided in the environment variables or configuration
file,the original values will be returned; otherwise, values containing
qianfan_ak and qianfan_sk will be returned.
Raises:
ValueError: qianfan package not found, please install it with `pip install
qianfan`
"""
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
default="",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
default="",
)
)
try:
import qianfan
params = {
**values.get("init_kwargs", {}),
"model": values["model"],
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
values["client"] = qianfan.Embedding(**params)
except ImportError:
raise ImportError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
def embed_query(self, text: str) -> List[float]:
resp = self.embed_documents([text])
return resp[0]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Embeds a list of text documents using the AutoVOT algorithm.
Args:
texts (List[str]): A list of text documents to embed.
Returns:
List[List[float]]: A list of embeddings for each document in the input list.
Each embedding is represented as a list of float values.
"""
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
resp = self.client.do(texts=chunk, **self.model_kwargs)
lst.extend([res["embedding"] for res in resp["data"]])
return lst
async def aembed_query(self, text: str) -> List[float]:
embeddings = await self.aembed_documents([text])
return embeddings[0]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
resp = await self.client.ado(texts=chunk, **self.model_kwargs)
for res in resp["data"]:
lst.extend([res["embedding"]])
return lst
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/infinity_local.py | """written under MIT Licence, Michael Feil 2023."""
import asyncio
from logging import getLogger
from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self
__all__ = ["InfinityEmbeddingsLocal"]
logger = getLogger(__name__)
class InfinityEmbeddingsLocal(BaseModel, Embeddings):
"""Optimized Infinity embedding models.
https://github.com/michaelfeil/infinity
This class deploys a local Infinity instance to embed text.
The class requires async usage.
Infinity is a class to interact with Embedding Models on https://github.com/michaelfeil/infinity
Example:
.. code-block:: python
from langchain_community.embeddings import InfinityEmbeddingsLocal
async with InfinityEmbeddingsLocal(
model="BAAI/bge-small-en-v1.5",
revision=None,
device="cpu",
) as embedder:
embeddings = await engine.aembed_documents(["text1", "text2"])
"""
model: str
"Underlying model id from huggingface, e.g. BAAI/bge-small-en-v1.5"
revision: Optional[str] = None
"Model version, the commit hash from huggingface"
batch_size: int = 32
"Internal batch size for inference, e.g. 32"
device: str = "auto"
"Device to use for inference, e.g. 'cpu' or 'cuda', or 'mps'"
backend: str = "torch"
"Backend for inference, e.g. 'torch' (recommended for ROCm/Nvidia)"
" or 'optimum' for onnx/tensorrt"
model_warmup: bool = True
"Warmup the model with the max batch size."
engine: Any = None #: :meta private:
"""Infinity's AsyncEmbeddingEngine."""
# LLM call kwargs
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
try:
from infinity_emb import AsyncEmbeddingEngine # type: ignore
except ImportError:
raise ImportError(
"Please install the "
"`pip install 'infinity_emb[optimum,torch]>=0.0.24'` "
"package to use the InfinityEmbeddingsLocal."
)
self.engine = AsyncEmbeddingEngine(
model_name_or_path=self.model,
device=self.device,
revision=self.revision,
model_warmup=self.model_warmup,
batch_size=self.batch_size,
engine=self.backend,
)
return self
async def __aenter__(self) -> None:
"""start the background worker.
recommended usage is with the async with statement.
async with InfinityEmbeddingsLocal(
model="BAAI/bge-small-en-v1.5",
revision=None,
device="cpu",
) as embedder:
embeddings = await engine.aembed_documents(["text1", "text2"])
"""
await self.engine.__aenter__()
async def __aexit__(self, *args: Any) -> None:
"""stop the background worker,
required to free references to the pytorch model."""
await self.engine.__aexit__(*args)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Infinity's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
if not self.engine.running:
logger.warning(
"Starting Infinity engine on the fly. This is not recommended."
"Please start the engine before using it."
)
async with self:
# spawning threadpool for multithreaded encode, tokenization
embeddings, _ = await self.engine.embed(texts)
# stopping threadpool on exit
logger.warning("Stopped infinity engine after usage.")
else:
embeddings, _ = await self.engine.embed(texts)
return embeddings
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
This method is async only.
"""
logger.warning(
"This method is async only. "
"Please use the async version `await aembed_documents`."
)
return asyncio.run(self.aembed_documents(texts))
def embed_query(self, text: str) -> List[float]:
""" """
logger.warning(
"This method is async only."
" Please use the async version `await aembed_query`."
)
return asyncio.run(self.aembed_query(text))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/text2vec.py | """Wrapper around text2vec embedding models."""
from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class Text2vecEmbeddings(Embeddings, BaseModel):
"""text2vec embedding models.
Install text2vec first, run 'pip install -U text2vec'.
The github repository for text2vec is : https://github.com/shibing624/text2vec
Example:
.. code-block:: python
from langchain_community.embeddings.text2vec import Text2vecEmbeddings
embedding = Text2vecEmbeddings()
embedding.embed_documents([
"This is a CoSENT(Cosine Sentence) model.",
"It maps sentences to a 768 dimensional dense vector space.",
])
embedding.embed_query(
"It can be used for text matching or semantic search."
)
"""
model_name_or_path: Optional[str] = None
encoder_type: Any = "MEAN"
max_seq_length: int = 256
device: Optional[str] = None
model: Any = None
model_config = ConfigDict(protected_namespaces=())
def __init__(
self,
*,
model: Any = None,
model_name_or_path: Optional[str] = None,
**kwargs: Any,
):
try:
from text2vec import SentenceModel
except ImportError as e:
raise ImportError(
"Unable to import text2vec, please install with "
"`pip install -U text2vec`."
) from e
model_kwargs = {}
if model_name_or_path is not None:
model_kwargs["model_name_or_path"] = model_name_or_path
model = model or SentenceModel(**model_kwargs, **kwargs)
super().__init__(model=model, model_name_or_path=model_name_or_path, **kwargs)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using the text2vec embeddings model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.model.encode(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using the text2vec embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.model.encode(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/gpt4all.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class GPT4AllEmbeddings(BaseModel, Embeddings):
"""GPT4All embedding models.
To use, you should have the gpt4all python package installed
Example:
.. code-block:: python
from langchain_community.embeddings import GPT4AllEmbeddings
model_name = "all-MiniLM-L6-v2.gguf2.f16.gguf"
gpt4all_kwargs = {'allow_download': 'True'}
embeddings = GPT4AllEmbeddings(
model_name=model_name,
gpt4all_kwargs=gpt4all_kwargs
)
"""
model_name: Optional[str] = None
n_threads: Optional[int] = None
device: Optional[str] = "cpu"
gpt4all_kwargs: Optional[dict] = {}
client: Any #: :meta private:
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that GPT4All library is installed."""
try:
from gpt4all import Embed4All
values["client"] = Embed4All(
model_name=values.get("model_name"),
n_threads=values.get("n_threads"),
device=values.get("device"),
**(values.get("gpt4all_kwargs") or {}),
)
except ImportError:
raise ImportError(
"Could not import gpt4all library. "
"Please install the gpt4all library to "
"use this embedding model: pip install gpt4all"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/laser.py | from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict
LASER_MULTILINGUAL_MODEL: str = "laser2"
class LaserEmbeddings(BaseModel, Embeddings):
"""LASER Language-Agnostic SEntence Representations.
LASER is a Python library developed by the Meta AI Research team
and used for creating multilingual sentence embeddings for over 147 languages
as of 2/25/2024
See more documentation at:
* https://github.com/facebookresearch/LASER/
* https://github.com/facebookresearch/LASER/tree/main/laser_encoders
* https://arxiv.org/abs/2205.12654
To use this class, you must install the `laser_encoders` Python package.
`pip install laser_encoders`
Example:
from laser_encoders import LaserEncoderPipeline
encoder = LaserEncoderPipeline(lang="eng_Latn")
embeddings = encoder.encode_sentences(["Hello", "World"])
"""
lang: Optional[str] = None
"""The language or language code you'd like to use
If empty, this implementation will default
to using a multilingual earlier LASER encoder model (called laser2)
Find the list of supported languages at
https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200
"""
_encoder_pipeline: Any = None # : :meta private:
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that laser_encoders has been installed."""
try:
from laser_encoders import LaserEncoderPipeline
lang = values.get("lang")
if lang:
encoder_pipeline = LaserEncoderPipeline(lang=lang)
else:
encoder_pipeline = LaserEncoderPipeline(laser=LASER_MULTILINGUAL_MODEL)
values["_encoder_pipeline"] = encoder_pipeline
except ImportError as e:
raise ImportError(
"Could not import 'laser_encoders' Python package. "
"Please install it with `pip install laser_encoders`."
) from e
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for documents using LASER.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: np.ndarray
embeddings = self._encoder_pipeline.encode_sentences(texts)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Generate single query text embeddings using LASER.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray
query_embeddings = self._encoder_pipeline.encode_sentences([text])
return query_embeddings.tolist()[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/huggingface.py | import warnings
from typing import Any, Dict, List, Optional
import requests
from langchain_core._api import deprecated, warn_deprecated
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field, SecretStr
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_BGE_MODEL = "BAAI/bge-large-en"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
"Represent this question for searching relevant passages: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"
@deprecated(
since="0.2.2",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFaceEmbeddings",
)
class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the Sentence Transformer model, such as `device`,
`prompts`, `default_prompt_name`, `revision`, `trust_remote_code`, or `token`.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer"""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the Sentence
Transformer model, such as `prompt_name`, `prompt`, `batch_size`, `precision`,
`normalize_embeddings`, and more.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer.encode"""
multi_process: bool = False
"""Run encode() on multiple GPUs."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
if "model_name" not in kwargs:
since = "0.2.16"
removal = "0.4.0"
warn_deprecated(
since=since,
removal=removal,
message=f"Default values for {self.__class__.__name__}.model_name"
+ f" were deprecated in LangChain {since} and will be removed in"
+ f" {removal}. Explicitly pass a model_name to the"
+ f" {self.__class__.__name__} constructor instead.",
)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence-transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
import sentence_transformers
texts = list(map(lambda x: x.replace("\n", " "), texts))
if self.multi_process:
pool = self.client.start_multi_process_pool()
embeddings = self.client.encode_multi_process(texts, pool)
sentence_transformers.SentenceTransformer.stop_multi_process_pool(pool)
else:
embeddings = self.client.encode(
texts, show_progress_bar=self.show_progress, **self.encode_kwargs
)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers``
and ``InstructorEmbedding`` python packages installed.
Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the model."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
if "model_name" not in kwargs:
since = "0.2.16"
removal = "0.4.0"
warn_deprecated(
since=since,
removal=removal,
message=f"Default values for {self.__class__.__name__}.model_name"
+ f" were deprecated in LangChain {since} and will be removed in"
+ f" {removal}. Explicitly pass a model_name to the"
+ f" {self.__class__.__name__} constructor instead.",
)
try:
from InstructorEmbedding import INSTRUCTOR
self.client = INSTRUCTOR(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
except ImportError as e:
raise ImportError("Dependencies for InstructorEmbedding not found.") from e
if "show_progress_bar" in self.encode_kwargs:
warn_deprecated(
since="0.2.5",
removal="1.0",
name="encode_kwargs['show_progress_bar']",
alternative=f"the show_progress method on {self.__class__.__name__}",
)
if self.show_progress:
warnings.warn(
"Both encode_kwargs['show_progress_bar'] and show_progress are set;"
"encode_kwargs['show_progress_bar'] takes precedence"
)
self.show_progress = self.encode_kwargs.pop("show_progress_bar")
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(
instruction_pairs,
show_progress_bar=self.show_progress,
**self.encode_kwargs,
)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client.encode(
[instruction_pair],
show_progress_bar=self.show_progress,
**self.encode_kwargs,
)[0]
return embedding.tolist()
class HuggingFaceBgeEmbeddings(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
To use Nomic, make sure the version of ``sentence_transformers`` >= 2.3.0.
Bge Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
model_name = "BAAI/bge-large-en-v1.5"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
Nomic Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
model_name = "nomic-ai/nomic-embed-text-v1"
model_kwargs = {
'device': 'cpu',
'trust_remote_code':True
}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
query_instruction = "search_query:",
embed_instruction = "search_document:"
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_BGE_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the model."""
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
"""Instruction to use for embedding query."""
embed_instruction: str = ""
"""Instruction to use for embedding document."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
if "model_name" not in kwargs:
since = "0.2.5"
removal = "0.4.0"
warn_deprecated(
since=since,
removal=removal,
message=f"Default values for {self.__class__.__name__}.model_name"
+ f" were deprecated in LangChain {since} and will be removed in"
+ f" {removal}. Explicitly pass a model_name to the"
+ f" {self.__class__.__name__} constructor instead.",
)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
if "-zh" in self.model_name:
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
if "show_progress_bar" in self.encode_kwargs:
warn_deprecated(
since="0.2.5",
removal="1.0",
name="encode_kwargs['show_progress_bar']",
alternative=f"the show_progress method on {self.__class__.__name__}",
)
if self.show_progress:
warnings.warn(
"Both encode_kwargs['show_progress_bar'] and show_progress are set;"
"encode_kwargs['show_progress_bar'] takes precedence"
)
self.show_progress = self.encode_kwargs.pop("show_progress_bar")
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = [self.embed_instruction + t.replace("\n", " ") for t in texts]
embeddings = self.client.encode(
texts, show_progress_bar=self.show_progress, **self.encode_kwargs
)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(
self.query_instruction + text,
show_progress_bar=self.show_progress,
**self.encode_kwargs,
)
return embedding.tolist()
class HuggingFaceInferenceAPIEmbeddings(BaseModel, Embeddings):
"""Embed texts using the HuggingFace API.
Requires a HuggingFace Inference API key and a model name.
"""
api_key: SecretStr
"""Your API key for the HuggingFace Inference API."""
model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
"""The name of the model to use for text embeddings."""
api_url: Optional[str] = None
"""Custom inference endpoint url. None for using default public url."""
additional_headers: Dict[str, str] = {}
"""Pass additional headers to the requests library if needed."""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@property
def _api_url(self) -> str:
return self.api_url or self._default_api_url
@property
def _default_api_url(self) -> str:
return (
"https://api-inference.huggingface.co"
"/pipeline"
"/feature-extraction"
f"/{self.model_name}"
)
@property
def _headers(self) -> dict:
return {
"Authorization": f"Bearer {self.api_key.get_secret_value()}",
**self.additional_headers,
}
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Get the embeddings for a list of texts.
Args:
texts (Documents): A list of texts to get embeddings for.
Returns:
Embedded texts as List[List[float]], where each inner List[float]
corresponds to a single input text.
Example:
.. code-block:: python
from langchain_community.embeddings import (
HuggingFaceInferenceAPIEmbeddings,
)
hf_embeddings = HuggingFaceInferenceAPIEmbeddings(
api_key="your_api_key",
model_name="sentence-transformers/all-MiniLM-l6-v2"
)
texts = ["Hello, world!", "How are you?"]
hf_embeddings.embed_documents(texts)
""" # noqa: E501
response = requests.post(
self._api_url,
headers=self._headers,
json={
"inputs": texts,
"options": {"wait_for_model": True, "use_cache": True},
},
)
return response.json()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/deepinfra.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict
DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32"
MAX_BATCH_SIZE = 1024
class DeepInfraEmbeddings(BaseModel, Embeddings):
"""Deep Infra's embedding inference service.
To use, you should have the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example:
.. code-block:: python
from langchain_community.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
model_id: str = DEFAULT_MODEL_ID
"""Embeddings model to use."""
normalize: bool = False
"""whether to normalize the computed embeddings"""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
deepinfra_api_token: Optional[str] = None
"""API token for Deep Infra. If not provided, the token is
fetched from the environment variable 'DEEPINFRA_API_TOKEN'."""
batch_size: int = MAX_BATCH_SIZE
"""Batch size for embedding requests."""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model_id": self.model_id}
def _embed(self, input: List[str]) -> List[List[float]]:
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
# send request
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"inputs": input, "normalize": self.normalize, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
embeddings = t["embeddings"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Deep Infra deployed embedding model.
For larger batches, the input list of texts is chunked into smaller
batches to avoid exceeding the maximum request size.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
instruction_pairs = [f"{self.embed_instruction}{text}" for text in texts]
chunks = [
instruction_pairs[i : i + self.batch_size]
for i in range(0, len(instruction_pairs), self.batch_size)
]
for chunk in chunks:
embeddings += self._embed(chunk)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Deep Infra deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/sagemaker_endpoint.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict
from langchain_community.llms.sagemaker_endpoint import ContentHandlerBase
class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]):
"""Content handler for LLM class."""
class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain_community.embeddings import SagemakerEndpointEmbeddings
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
#Use with boto3 client
client = boto3.client(
"sagemaker-runtime",
region_name=region_name
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
client=client
)
"""
client: Any = None
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: EmbeddingsContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain_community.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompts: prompts, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["vectors"]
""" # noqa: E501
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, extra="forbid", protected_namespaces=()
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Dont do anything if client provided externally"""
if values.get("client") is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
f"profile name are valid. {e}"
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.extend(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/llm_rails.py | """This file is for LLMRails Embedding"""
from typing import Dict, List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, SecretStr
class LLMRailsEmbeddings(BaseModel, Embeddings):
"""LLMRails embedding models.
To use, you should have the environment
variable ``LLM_RAILS_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Model can be one of ["embedding-english-v1","embedding-multi-v1"]
Example:
.. code-block:: python
from langchain_community.embeddings import LLMRailsEmbeddings
cohere = LLMRailsEmbeddings(
model="embedding-english-v1", api_key="my-api-key"
)
"""
model: str = "embedding-english-v1"
"""Model name to use."""
api_key: Optional[SecretStr] = None
"""LLMRails API key."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
api_key = convert_to_secret_str(
get_from_dict_or_env(values, "api_key", "LLM_RAILS_API_KEY")
)
values["api_key"] = api_key
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
response = requests.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": self.api_key.get_secret_value()}, # type: ignore[union-attr]
json={"input": texts, "model": self.model},
timeout=60,
)
return [item["embedding"] for item in response.json()["data"]]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.